/* * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
if (!_heap.is_allocating(cast_from_oop<uintptr_t>(obj))) { // An object that isn't allocating, is visible from GC tracing. Such // stack chunks require barriers. returntrue;
}
if (!ZAddress::is_good_or_null(*cont_addr)) { // If a chunk is allocated after a GC started, but before relocate start // we can have an allocating chunk that isn't deeply good. That means that // the contained oops might be bad and require GC barriers. returntrue;
}
// The chunk is allocating and its pointers are good. This chunk needs no // GC barriers returnfalse;
}
// Expand and retry allocation
MetaWord* const result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); if (result != NULL) { return result;
}
// As a last resort, try a critical allocation, riding on a synchronous full GC return MetaspaceCriticalAllocation::allocate(loader_data, size, mdtype);
}
void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { // These collection requests are ignored since ZGC can't run a synchronous // GC cycle from within the VM thread. This is considered benign, since the // only GC causes coming in here should be heap dumper and heap inspector. // However, neither the heap dumper nor the heap inspector really need a GC // to happen, but the result of their heap iterations might in that case be // less accurate since they might include objects that would otherwise have // been collected by a GC.
assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
guarantee(cause == GCCause::_heap_dump ||
cause == GCCause::_heap_inspection, "Invalid cause");
}
void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) { // Not supported
ShouldNotReachHere();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.