/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
MutexLocker x(&_par_alloc_lock); // This ought to be just "allocate", because of the lock above, but that // ContiguousSpace::allocate asserts that either the allocating thread // holds the heap lock or it is the VM thread and we're at a safepoint. // The best I (dld) could figure was to put a field in ContiguousSpace // meaning "locking at safepoint taken care of", and set/reset that // here. But this will do for now, especially in light of the comment // above. Perhaps in the future some lock-free manner of keeping the // coordination.
HeapWord* res = ContiguousSpace::par_allocate(size); if (res != NULL) {
_offsets.alloc_block(res, size);
} return res;
}
public:
DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
size_t ratio = _space->allowed_dead_ratio();
_active = ratio > 0;
if (_active) {
assert(!UseG1GC, "G1 should not be using dead space");
// We allow some amount of garbage towards the bottom of the space, so // we don't start compacting before there is a significant gain to be made. // Occasionally, we want to ensure a full compaction, which is determined // by the MarkSweepAlwaysCompactCount parameter. if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
_allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
} else {
_active = false;
}
}
}
if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) { // we have a chunk of the space which hasn't moved and we've reinitialized // the mark word during the previous pass, so we can't use is_gc_marked for // the traversal.
HeapWord* prev_obj = NULL;
template <class SpaceType> inlinevoid CompactibleSpace::clear_empty_region(SpaceType* space) { // Let's remember if we were empty before we did the compaction. bool was_empty = space->used_region().is_empty(); // Reset space after compaction is complete
space->reset_after_compaction(); // We do this clear, below, since it has overloaded meanings for some // space subtypes. For example, OffsetTableContigSpace's that were // compacted into will have had their offset table thresholds updated // continuously, but those that weren't need to have their thresholds // re-initialized. Also mangles unused area for debugging. if (space->used_region().is_empty()) { if (!was_empty) space->clear(SpaceDecorator::Mangle);
} else { if (ZapUnusedHeapArea) space->mangle_unused_area();
}
} #endif// INCLUDE_SERIALGC
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.