/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// A "CollectedHeap" is an implementation of a java heap for HotSpot. This // is an abstract class: there may be many different kinds of heaps. This // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps.
class WorkerTask; class AdaptiveSizePolicy; class BarrierSet; class GCHeapLog; class GCHeapSummary; class GCTimer; class GCTracer; class GCMemoryManager; class MemoryPool; class MetaspaceSummary; class ReservedHeapSpace; class SoftRefPolicy; class Thread; class ThreadClosure; class VirtualSpaceSummary; class WorkerThreads; class nmethod;
class ParallelObjectIteratorImpl : public CHeapObj<mtGC> { public: virtual ~ParallelObjectIteratorImpl() {} virtualvoid object_iterate(ObjectClosure* cl, uint worker_id) = 0;
};
// User facing parallel object iterator. This is a StackObj, which ensures that // the _impl is allocated and deleted in the scope of this object. This ensures // the life cycle of the implementation is as required by ThreadsListHandle, // which is sometimes used by the root iterators. class ParallelObjectIterator : public StackObj {
ParallelObjectIteratorImpl* _impl;
// Historic gc information
size_t _capacity_at_last_gc;
size_t _used_at_last_gc;
// First, set it to java_lang_Object. // Then, set it to FillerObject after the FillerObject_klass loading is complete. static Klass* _filler_object_klass;
protected: // Not used by all GCs
MemRegion _reserved;
bool _is_gc_active;
// (Minimum) Alignment reserve for TLABs and PLABs. static size_t _lab_alignment_reserve; // Used for filler objects (static, but initialized in ctor). static size_t _filler_array_max_size;
static size_t _stack_chunk_max_size; // 0 for no limit
// Last time the whole heap has been examined in support of RMI // MaxObjectInspectionAge. // This timestamp must be monotonically non-decreasing to avoid // time-warp warnings.
jlong _last_whole_heap_examined_time_ns;
unsignedint _total_collections; // ... started unsignedint _total_full_collections; // ... started
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
// Reason for current garbage collection. Should be set to // a value reflecting no collection between collections.
GCCause::Cause _gc_cause;
GCCause::Cause _gc_lastcause;
PerfStringVariable* _perf_gc_cause;
PerfStringVariable* _perf_gc_lastcause;
// Constructor
CollectedHeap();
// Create a new tlab. All TLAB allocations must go through this. // To allow more flexible TLAB allocations min_size specifies // the minimum size needed, while requested_size is the requested // size based on ergonomics. The actually allocated size will be // returned in actual_size. virtual HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size);
// Reinitialize tlabs before resuming mutators. virtualvoid resize_all_tlabs();
// Raw memory allocation facilities // The obj and array allocate methods are covers for these methods. // mem_allocate() should never be // called to allocate TLABs, only individual objects. virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) = 0;
// Fill with a single array; caller must ensure filler_array_min_size() <= // words <= filler_array_max_size(). staticinlinevoid fill_with_array(HeapWord* start, size_t words, bool zap = true);
// Fill with a single object (either an int array or a java.lang.Object). staticinlinevoid fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
protected: // Get a pointer to the derived heap object. Used to implement // derived class heap() functions rather than being called directly. template<typename T> static T* named_heap(Name kind) {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized heap");
assert(kind == heap->kind(), "Heap kind %u should be %u", static_cast<uint>(heap->kind()), static_cast<uint>(kind)); returnstatic_cast<T*>(heap);
}
/** * Returns JNI error code JNI_ENOMEM if memory could not be allocated, * and JNI_OK on success.
*/ virtual jint initialize() = 0;
// In many heaps, there will be a need to perform some initialization activities // after the Universe is fully formed, but before general heap allocation is allowed. // This is the correct place to place such initialization methods. virtualvoid post_initialize();
// Stop any onging concurrent work and prepare for exit. virtualvoid stop() {}
// Stop and resume concurrent GC threads interfering with safepoint operations virtualvoid safepoint_synchronize_begin() {} virtualvoid safepoint_synchronize_end() {}
// Return "true" if the part of the heap that allocates Java // objects has reached the maximal committed limit that it can // reach, without a garbage collection. virtualbool is_maximal_no_gc() const = 0;
// Support for java.lang.Runtime.maxMemory(): return the maximum amount of // memory that the vm could make available for storing 'normal' java objects. // This is based on the reserved address space, but should not include space // that the vm uses internally for bookkeeping or temporary storage // (e.g., in the case of the young gen, one of the survivor // spaces). virtual size_t max_capacity() const = 0;
// Returns "TRUE" iff "p" points into the committed areas of the heap. // This method can be expensive so avoid using it in performance critical // code. virtualbool is_in(constvoid* p) const = 0;
// Utilities for turning raw memory into filler objects. // // min_fill_size() is the smallest region that can be filled. // fill_with_objects() can fill arbitrary-sized regions of the heap using // multiple objects. fill_with_object() is for regions known to be smaller // than the largest array of integers; it uses a single object to fill the // region and has slightly less overhead. static size_t min_fill_size() { return size_t(align_object_size(oopDesc::header_size()));
}
// Some heaps may be in an unparseable state at certain times between // collections. This may be necessary for efficient implementation of // certain allocation-related activities. Calling this function before // attempting to parse a heap ensures that the heap is in a parsable // state (provided other concurrent activity does not introduce // unparsability). It is normally expected, therefore, that this // method is invoked with the world stopped. // NOTE: if you override this method, make sure you call // super::ensure_parsability so that the non-generational // part of the work gets done. See implementation of // CollectedHeap::ensure_parsability and, for instance, // that of GenCollectedHeap::ensure_parsability(). // The argument "retire_tlabs" controls whether existing TLABs // are merely filled or also retired, thus preventing further // allocation from them and necessitating allocation of new TLABs. virtualvoid ensure_parsability(bool retire_tlabs);
// The amount of space available for thread-local allocation buffers. virtual size_t tlab_capacity(Thread *thr) const = 0;
// The amount of used space for thread-local allocation buffers for the given thread. virtual size_t tlab_used(Thread *thr) const = 0;
virtual size_t max_tlab_size() const;
// An estimate of the maximum allocation that could be performed // for thread-local allocation buffers without triggering any // collection or expansion activity. virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
guarantee(false, "thread-local allocation buffers not supported"); return 0;
}
// If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent, // incremental and cooperative. In order for that to work well, mechanisms that stop // another thread might want to ensure its roots are in a sane state. virtualbool uses_stack_watermark_barrier() const { returnfalse; }
// Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. virtualvoid collect(GCCause::Cause cause) = 0;
// Perform a full collection virtualvoid do_full_collection(bool clear_all_soft_refs) = 0;
// This interface assumes that it's being called by the // vm thread. It collects the heap assuming that the // heap lock is already held and that we are executing in // the context of the vm thread. virtualvoid collect_as_vm_thread(GCCause::Cause cause);
// Return true, if accesses to the object would require barriers. // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk // object back into the thread stack. These chunks may contain references to objects. It is crucial that // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed // when stack chunks are stored into it. // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly // allocated object. virtualbool requires_barriers(stackChunkOop obj) const = 0;
// Returns "true" iff there is a stop-world GC in progress. (I assume // that it should answer "false" for the concurrent part of a concurrent // collector -- dld). bool is_gc_active() const { return _is_gc_active; }
// Total number of GC collections (started) unsignedint total_collections() const { return _total_collections; } unsignedint total_full_collections() const { return _total_full_collections;}
// Increment total number of GC collections (started) void increment_total_collections(bool full = false) {
_total_collections++; if (full) {
increment_total_full_collections();
}
}
public: // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtualvoid keep_alive(oop obj) {}
// Perform any cleanup actions necessary before allowing a verification. virtualvoid prepare_for_verify() = 0;
// Returns the longest time (in ms) that has elapsed since the last // time that the whole heap has been examined by a garbage collection.
jlong millis_since_last_whole_heap_examined(); // GC should call this when the next whole heap analysis has completed to // satisfy above requirement. void record_whole_heap_examined_timestamp();
private: // Generate any dumps preceding or following a full gc void full_gc_dump(GCTimer* timer, bool before);
// Print heap information on the given outputStream. virtualvoid print_on(outputStream* st) const = 0; // The default behavior is to call print_on() on tty. virtualvoid print() const;
// Print more detailed heap information on the given // outputStream. The default behavior is to call print_on(). It is // up to each subclass to override it and add any additional output // it needs. virtualvoid print_extended_on(outputStream* st) const {
print_on(st);
}
// Return true if concurrent gc control via WhiteBox is supported by // this collector. The default implementation returns false. virtualbool supports_concurrent_gc_breakpoints() const;
// Workers used in non-GC safepoints for parallel safepoint cleanup. If this // method returns NULL, cleanup tasks are done serially in the VMThread. See // `SafepointSynchronize::do_cleanup_tasks` for details. // GCs using a GC worker thread pool inside GC safepoints may opt to share // that pool with non-GC safepoints, avoiding creating extraneous threads. // Such sharing is safe, because GC safepoints and non-GC safepoints never // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the // same thread-pool. virtual WorkerThreads* safepoint_workers() { return NULL; }
// Support for object pinning. This is used by JNI Get*Critical() // and Release*Critical() family of functions. If supported, the GC // must guarantee that pinned objects never move. virtualbool supports_object_pinning() const; virtual oop pin_object(JavaThread* thread, oop obj); virtualvoid unpin_object(JavaThread* thread, oop obj);
// Is the given object inside a CDS archive area? virtualbool is_archived_object(oop object) const;
// Support for loading objects from CDS archive into the heap // (usually as a snapshot of the old generation). virtualbool can_load_archived_objects() const { returnfalse; } virtual HeapWord* allocate_loaded_archive_space(size_t size) { return NULL; } virtualvoid complete_loaded_archive_space(MemRegion archive_space) { }
virtualbool is_oop(oop object) const; // Non product verification and debugging. #ifndef PRODUCT // Support for PromotionFailureALot. Return true if it's time to cause a // promotion failure. The no-argument version uses // this->_promotion_failure_alot_count as the counter. bool promotion_should_fail(volatile size_t* count); bool promotion_should_fail();
// Reset the PromotionFailureALot counters. Should be called at the end of a // GC in which promotion failure occurred. void reset_promotion_should_fail(volatile size_t* count); void reset_promotion_should_fail(); #endif// #ifndef PRODUCT
};
// Class to set and reset the GC cause for a CollectedHeap.
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.