/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
class G1CardSetConfiguration; class G1CollectedHeap; class G1CMBitMap; class G1Predictions; class HeapRegionRemSet; class HeapRegion; class HeapRegionSetBase; class nmethod;
// sentinel value for hrm_index #define G1_NO_HRM_INDEX ((uint) -1)
// A HeapRegion is the smallest piece of a G1CollectedHeap that // can be collected independently.
// Each heap region is self contained. top() and end() can never // be set beyond the end of the region. For humongous objects, // the first region is a StartsHumongous region. If the humongous // object is larger than a heap region, the following regions will // be of type ContinuesHumongous. In this case the top() of the // StartHumongous region and all ContinuesHumongous regions except // the last will point to their own end. The last ContinuesHumongous // region may have top() equal the end of object if there isn't // room for filler objects to pad out to the end of the region. class HeapRegion : public CHeapObj<mtGC> { friendclass VMStructs;
HeapWord* const _bottom;
HeapWord* const _end;
HeapWord* volatile _top;
G1BlockOffsetTablePart _bot_part;
// When we need to retire an allocation region, while other threads // are also concurrently trying to allocate into it, we typically // allocate a dummy object at the end of the region to ensure that // no more allocations can take place in it. However, sometimes we // want to know where the end of the last "real" object we allocated // into the region was and this is what this keeps track.
HeapWord* _pre_dummy_top;
// See the comment above in the declaration of _pre_dummy_top for an // explanation of what it is. void set_pre_dummy_top(HeapWord* pre_dummy_top) {
assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
_pre_dummy_top = pre_dummy_top;
}
HeapWord* pre_dummy_top() const { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; } void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
// Returns true iff the given the heap region contains the // given address as part of an allocated object. This may // be a potentially, so we restrict its use to assertion checks only. bool is_in(constvoid* p) const { return is_in_reserved(p);
} bool is_in(oop obj) const { return is_in((void*)obj);
} // Returns true iff the given reserved memory of the space contains the // given address. bool is_in_reserved(constvoid* p) const { return _bottom <= p && p < _end; }
// Try to allocate at least min_word_size and up to desired_size from this region. // Returns NULL if not possible, otherwise sets actual_word_size to the amount of // space allocated. // This version assumes that all allocation requests to this HeapRegion are properly // synchronized. inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); // Try to allocate at least min_word_size and up to desired_size from this HeapRegion. // Returns NULL if not possible, otherwise sets actual_word_size to the amount of // space allocated. // This version synchronizes with other calls to par_allocate_impl(). inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
// Returns the address of the block reaching into or starting at addr.
HeapWord* block_start(constvoid* addr) const;
HeapWord* block_start(constvoid* addr, HeapWord* const pb) const;
void object_iterate(ObjectClosure* blk);
// At the given address create an object with the given size. If the region // is old the BOT will be updated if the object spans a threshold. void fill_with_dummy_object(HeapWord* address, size_t word_size, bool zap = true);
// Create objects in the given range. The BOT will be updated if needed and // the created objects will have their header marked to show that they are // dead. void fill_range_with_dead_objects(HeapWord* start, HeapWord* end);
// All allocations are done without updating the BOT. The BOT // needs to be kept in sync for old generation regions and // this is done by explicit updates when crossing thresholds. inline HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* word_size); inline HeapWord* allocate(size_t word_size); inline HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
// Update BOT if this obj is the first entering a new card (i.e. crossing the card boundary). inlinevoid update_bot_for_obj(HeapWord* obj_start, size_t obj_size);
// Update heap region that has been compacted to be consistent after Full GC. void reset_compacted_after_full_gc(HeapWord* new_top); // Update skip-compacting heap region to be consistent after Full GC. void reset_skip_compacting_after_full_gc();
// All allocated blocks are occupied by objects in a HeapRegion. bool block_is_obj(const HeapWord* p, HeapWord* pb) const;
// Returns whether the given object is dead based on the given parsable_bottom (pb). // For an object to be considered dead it must be below pb and scrubbed. bool is_obj_dead(oop obj, HeapWord* pb) const;
// Returns the object size for all valid block starts. If parsable_bottom (pb) // is given, calculates the block size based on that parsable_bottom, not the // current value of this HeapRegion.
size_t block_size(const HeapWord* p) const;
size_t block_size(const HeapWord* p, HeapWord* pb) const;
// Scans through the region using the bitmap to determine what // objects to call size_t ApplyToMarkedClosure::apply(oop) for. template<typename ApplyToMarkedClosure> inlinevoid apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
// Update the BOT for the entire region - assumes that all objects are parsable // and contiguous for this region. void update_bot();
private: // The remembered set for this region.
HeapRegionRemSet* _rem_set;
// Cached index of this region in the heap region sequence. const uint _hrm_index;
HeapRegionType _type;
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
staticconst uint InvalidCSetIndex = UINT_MAX;
// The index in the optional regions array, if this region // is considered optional during a mixed collections.
uint _index_in_opt_cset;
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev; #ifdef ASSERT
HeapRegionSetBase* _containing_set; #endif// ASSERT
// The start of the unmarked area. The unmarked area extends from this // word until the top and/or end of the region, and is the part // of the region for which no marking was done, i.e. objects may // have been allocated in this part since the last mark phase.
HeapWord* volatile _top_at_mark_start;
// The area above this limit is fully parsable. This limit // is equal to bottom except from Remark and until the region has been // scrubbed concurrently. The scrubbing ensures that all dead objects (with // possibly unloaded classes) have beenreplaced with filler objects that // are parsable. Below this limit the marking bitmap must be used to // determine size and liveness.
HeapWord* volatile _parsable_bottom;
// Amount of dead data in the region.
size_t _garbage_bytes;
inlinevoid init_top_at_mark_start();
// Data for young region survivor prediction.
uint _young_index_in_cset;
G1SurvRateGroup* _surv_rate_group; int _age_index;
// Cached attributes used in the collection set policy information
// The calculated GC efficiency of the region. double _gc_efficiency;
// Iterate over the references covered by the given MemRegion in a humongous // object and apply the given closure to them. // Humongous objects are allocated directly in the old-gen. So we need special // handling for concurrent processing encountering an in-progress allocation. // Returns the address after the last actually scanned or NULL if the area could // not be scanned (That should only happen when invoked concurrently with the // mutator). template <class Closure, bool in_gc_pause> inline HeapWord* do_oops_on_memregion_in_humongous(MemRegion mr,
Closure* cl);
// If this region is a member of a HeapRegionManager, the index in that // sequence, otherwise -1.
uint hrm_index() const { return _hrm_index; }
// Initializing the HeapRegion not only resets the data structure, but also // resets the BOT for that heap region. // The default values for clear_space means that we will do the clearing if // there's clearing to be done ourselves. We also always mangle the space. void initialize(bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
// Returns whether a field is in the same region as the obj it points to. template <typename T> staticbool is_in_same_region(T* p, oop obj) {
assert(p != NULL, "p can't be NULL");
assert(obj != NULL, "obj can't be NULL"); return (((uintptr_t) p ^ cast_from_oop<uintptr_t>(obj)) >> LogOfHRGrainBytes) == 0;
}
// It sets up the heap region size (GrainBytes / GrainWords), as well as // other related fields that are based on the heap region size // (LogOfHRGrainBytes / CardsPerRegion). All those fields are considered // constant throughout the JVM's execution, therefore they should only be set // up once during initialization time. staticvoid setup_heap_region_size(size_t max_heap_size);
// An upper bound on the number of live bytes in the region.
size_t live_bytes() const { return used() - garbage_bytes();
}
// A lower bound on the amount of garbage bytes in the region.
size_t garbage_bytes() const { return _garbage_bytes; }
// Return the amount of bytes we'll reclaim if we collect this // region. This includes not only the known garbage bytes in the // region but also any unallocated space in it, i.e., [top, end), // since it will also be reclaimed if we collect the region.
size_t reclaimable_bytes() {
size_t known_live_bytes = live_bytes();
assert(known_live_bytes <= capacity(), "sanity"); return capacity() - known_live_bytes;
}
// Get the start of the unmarked area in this region.
HeapWord* top_at_mark_start() const; void set_top_at_mark_start(HeapWord* value);
// Retrieve parsable bottom; since it may be modified concurrently, outside a // safepoint the _acquire method must be used.
HeapWord* parsable_bottom() const;
HeapWord* parsable_bottom_acquire() const; void reset_parsable_bottom();
// Note the start or end of marking. This tells the heap region // that the collector is about to start or has finished (concurrently) // marking the heap.
// Notify the region that concurrent marking is starting. Initialize // all fields related to the next marking info. inlinevoid note_start_of_marking();
// Notify the region that concurrent marking has finished. Passes the number of // bytes between bottom and TAMS. inlinevoid note_end_of_marking(size_t marked_bytes);
// Notify the region that scrubbing has completed. inlinevoid note_end_of_scrubbing();
// Notify the region that the (corresponding) bitmap has been cleared. inlinevoid reset_top_at_mark_start();
// During the concurrent scrubbing phase, can there be any areas with unloaded // classes or dead objects in this region? // This set only includes old and open archive regions - humongous regions only // contain a single object which is either dead or live, contents of closed archive // regions never die (so is always contiguous), and young regions are never even // considered during concurrent scrub. bool needs_scrubbing() const { return is_old() || is_open_archive(); } // Same question as above, during full gc. Full gc needs to scrub any region that // might be skipped for compaction. This includes young generation regions as the // region relabeling to old happens later than scrubbing. bool needs_scrubbing_during_full_gc() const { return is_young() || needs_scrubbing(); }
// A pinned region contains objects which are not moved by garbage collections. // Humongous regions and archive regions are pinned. bool is_pinned() const { return _type.is_pinned(); }
// An archive region is a pinned region, also tagged as old, which // should not be marked during mark/sweep. This allows the address // space to be shared by JVM instances. bool is_archive() const { return _type.is_archive(); } bool is_open_archive() const { return _type.is_open_archive(); } bool is_closed_archive() const { return _type.is_closed_archive(); }
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const { return _humongous_start_region;
}
// Makes the current region be a "starts humongous" region, i.e., // the first region in a series of one or more contiguous regions // that will contain a single "humongous" object. // // obj_top : points to the top of the humongous object. // fill_size : size of the filler object at the end of the region series. void set_starts_humongous(HeapWord* obj_top, size_t fill_size);
// Makes the current region be a "continues humongous' // region. first_hr is the "start humongous" region of the series // which this region will be part of. void set_continues_humongous(HeapRegion* first_hr);
// Unsets the humongous-related fields on the region. void clear_humongous();
void set_rem_set(HeapRegionRemSet* rem_set) { _rem_set = rem_set; } // If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const { return _rem_set;
}
inlinebool in_collection_set() const;
void prepare_remset_for_scan();
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next and prev fields used to link regions into // linked lists. void set_next(HeapRegion* next) { _next = next; }
HeapRegion* next() { return _next; }
// Every region added to a set is tagged with a reference to that // set. This is used for doing consistency checking to make sure that // the contents of a set are as they should be and it's only // available in non-product builds. #ifdef ASSERT void set_containing_set(HeapRegionSetBase* containing_set) {
assert((containing_set != NULL && _containing_set == NULL) ||
containing_set == NULL, "containing_set: " PTR_FORMAT " " "_containing_set: " PTR_FORMAT,
p2i(containing_set), p2i(_containing_set));
// containing_set() is only used in asserts so there's no reason // to provide a dummy version of it. #endif// ASSERT
// Reset the HeapRegion to default values and clear its remembered set. // If clear_space is true, clear the HeapRegion's memory. // Callers must ensure this is not called by multiple threads at the same time. void hr_clear(bool clear_space); // Clear the card table corresponding to this region. void clear_cardtable();
// Notify the region that an evacuation failure occurred for an object within this // region. void note_evacuation_failure(bool during_concurrent_start);
// Notify the region that we have partially finished processing self-forwarded // objects during evacuation failure handling. void note_self_forward_chunk_done(size_t garbage_bytes);
// Determine if an object is in the parsable or the to-be-scrubbed area. inlinestaticbool obj_in_parsable_area(const HeapWord* addr, HeapWord* pb); inlinestaticbool obj_in_unparsable_area(oop obj, HeapWord* pb);
// Update the region state after a failed evacuation. void handle_evacuation_failure();
// Iterate over the objects overlapping the given memory region, applying cl // to all references in the region. This is a helper for // G1RemSet::refine_card*, and is tightly coupled with them. // mr must not be empty. Must be trimmed to the allocated/parseable space in this region. // This region must be old or humongous. // Returns the next unscanned address if the designated objects were successfully // processed, NULL if an unparseable part of the heap was encountered (That should // only happen when invoked concurrently with the mutator). template <bool in_gc_pause, class Closure> inline HeapWord* oops_on_memregion_seq_iterate_careful(MemRegion mr, Closure* cl);
// Routines for managing a list of code roots (attached to the // this region's RSet) that point into this heap region. void add_code_root(nmethod* nm); void add_code_root_locked(nmethod* nm); void remove_code_root(nmethod* nm);
// Applies blk->do_code_blob() to each of the entries in // the code roots list for this region void code_roots_do(CodeBlobClosure* blk) const;
// Verify that the entries on the code root list for this // region are live and include at least one pointer into this region. void verify_code_roots(VerifyOption vo, bool* failures) const;
// HeapRegionClosure is used for iterating over regions. // Terminates the iteration when the "do_heap_region" method returns "true". class HeapRegionClosure : public StackObj { friendclass HeapRegionManager; friendclass G1CollectionSet; friendclass G1CollectionSetCandidates;
// Typically called on each region until it returns true. virtualbool do_heap_region(HeapRegion* r) = 0;
// True after iteration if the closure was applied to all heap regions // and returned "false" in all cases. bool is_complete() { return _is_complete; }
};
class HeapRegionIndexClosure : public StackObj { friendclass HeapRegionManager; friendclass G1CollectionSet; friendclass G1CollectionSetCandidates;
// Typically called on each region until it returns true. virtualbool do_heap_region_index(uint region_index) = 0;
// True after iteration if the closure was applied to all heap regions // and returned "false" in all cases. bool is_complete() { return _is_complete; }
};
#endif// SHARE_GC_G1_HEAPREGION_HPP
Messung V0.5
¤ Dauer der Verarbeitung: 0.37 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.