/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// The CodeCache implements the code cache for various pieces of generated // code, e.g., compiled java methods, runtime stubs, transition frames, etc. // The entries in the CodeCache are all CodeBlob's.
// -- Implementation -- // The CodeCache consists of one or more CodeHeaps, each of which contains // CodeBlobs of a specific CodeBlobType. Currently heaps for the following // types are available: // - Non-nmethods: Non-nmethods like Buffers, Adapters and Runtime Stubs // - Profiled nmethods: nmethods that are profiled, i.e., those // executed at level 2 or 3 // - Non-Profiled nmethods: nmethods that are not profiled, i.e., those // executed at level 1 or 4 and native methods // - All: Used for code of all types if code cache segmentation is disabled. // // In the rare case of the non-nmethod code heap getting full, non-nmethod code // will be stored in the non-profiled code heap as a fallback solution. // // Depending on the availability of compilers and compilation mode there // may be fewer heaps. The size of the code heaps depends on the values of // ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize // (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..) // for details). // // Code cache segmentation is controlled by the flag SegmentedCodeCache. // If turned off, all code types are stored in a single code heap. By default // code cache segmentation is turned on if tiered mode is enabled and // ReservedCodeCacheSize >= 240 MB. // // All methods of the CodeCache accepting a CodeBlobType only apply to // CodeBlobs of the given type. For example, iteration over the // CodeBlobs of a specific type can be done by using CodeCache::first_blob(..) // and CodeCache::next_blob(..) and providing the corresponding CodeBlobType. // // IMPORTANT: If you add new CodeHeaps to the code cache or change the // existing ones, make sure to adapt the dtrace scripts (jhelper.d) for // Solaris and BSD.
class ExceptionCache; class KlassDepChange; class OopClosure; class ShenandoahParallelCodeHeapIterator; class NativePostCallNop;
static address _low_bound; // Lower bound of CodeHeap addresses static address _high_bound; // Upper bound of CodeHeap addresses staticint _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
static uint8_t _unloading_cycle; // Global state for recognizing old nmethods that need to be unloaded static uint64_t _gc_epoch; // Global state for tracking when nmethods were found to be on-stack static uint64_t _cold_gc_count; // Global state for determining how many GCs are needed before an nmethod is cold static size_t _last_unloading_used; staticdouble _last_unloading_time; static TruncatedSeq _unloading_gc_intervals; static TruncatedSeq _unloading_allocation_rates; staticvolatilebool _unloading_threshold_gc_requested; static nmethod* volatile _unlinked_head;
// CodeHeap management staticvoid initialize_heaps(); // Initializes the CodeHeaps // Check the code heap sizes set by the user via command line staticvoid check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set); // Creates a new heap with the given name and size, containing CodeBlobs of the given type staticvoid add_heap(ReservedSpace rs, constchar* name, CodeBlobType code_blob_type); static CodeHeap* get_code_heap_containing(void* p); // Returns the CodeHeap containing the given pointer, or NULL static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob static CodeHeap* get_code_heap(CodeBlobType code_blob_type); // Returns the CodeHeap for the given CodeBlobType // Returns the name of the VM option to set the size of the corresponding CodeHeap staticconstchar* get_code_heap_flag_name(CodeBlobType code_blob_type); static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap static CodeBlob* first_blob(CodeBlobType code_blob_type); // Returns the first CodeBlob of the given type static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap
// Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap. staticbool contains(CodeBlob *p) { fatal("don't call me!"); returnfalse; }
public: // Initialization staticvoid initialize(); static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
// Allocation/administration static CodeBlob* allocate(int size, CodeBlobType code_blob_type, bool handle_alloc_failure = true, CodeBlobType orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob staticvoid commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled staticint alignment_unit(); // guaranteed alignment of all CodeBlobs staticint alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) staticvoid free(CodeBlob* cb); // frees a CodeBlob staticvoid free_unused_tail(CodeBlob* cb, size_t used); // frees the unused tail of a CodeBlob (only used by TemplateInterpreter::initialize()) staticbool contains(void *p); // returns whether p is included staticbool contains(nmethod* nm); // returns whether nm is included staticvoid blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs staticvoid blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs staticvoid nmethods_do(void f(nmethod* nm)); // iterates over all nmethods staticvoid metadata_do(MetadataClosure* f); // iterates over metadata in alive nmethods
// Lookup static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address static CodeBlob* find_blob_fast(void* start); // Returns the CodeBlob containing the given address static CodeBlob* find_blob_and_oopmap(void* start, int& slot); // Returns the CodeBlob containing the given address staticint find_oopmap_slot_fast(void* start); // Returns a fast oopmap slot if there is any; -1 otherwise static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address static CompiledMethod* find_compiled(void* start);
staticint blob_count(); // Returns the total number of CodeBlobs in the cache staticint blob_count(CodeBlobType code_blob_type); staticint adapter_count(); // Returns the total number of Adapters in the cache staticint adapter_count(CodeBlobType code_blob_type); staticint nmethod_count(); // Returns the total number of nmethods in the cache staticint nmethod_count(CodeBlobType code_blob_type);
// GC support staticvoid verify_oops(); // If any oops are not marked this method unloads (i.e., breaks root links // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" // to "true" iff some code got unloaded. // "unloading_occurred" controls whether metadata should be cleaned because of class unloading. class UnloadingScope: StackObj {
ClosureIsUnloadingBehaviour _is_unloading_behaviour;
IsUnloadingBehaviour* _saved_behaviour;
// The GC epoch and marking_cycle code below is there to support sweeping // nmethods in loom stack chunks. static uint64_t gc_epoch(); staticbool is_gc_marking_cycle_active(); static uint64_t previous_completed_gc_marking_cycle(); staticvoid on_gc_marking_cycle_start(); staticvoid on_gc_marking_cycle_finish(); staticvoid arm_all_nmethods();
staticvoid clear_inline_caches(); // clear all inline caches staticvoid cleanup_inline_caches_whitebox(); // clean bad nmethods from inline caches
// Returns true if an own CodeHeap for the given CodeBlobType is available staticbool heap_available(CodeBlobType code_blob_type);
// Returns the CodeBlobType for the given CompiledMethod static CodeBlobType get_code_blob_type(CompiledMethod* cm) { return get_code_heap(cm)->code_blob_type();
}
// Flushing and deoptimization staticvoid flush_dependents_on(InstanceKlass* dependee);
// RedefineClasses support // Flushing and deoptimization in case of evolution staticint mark_dependents_for_evol_deoptimization(); staticvoid mark_all_nmethods_for_evol_deoptimization(); staticvoid flush_evol_dependents(); staticvoid old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN; staticvoid unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN;
// Support for fullspeed debugging staticvoid flush_dependents_on_method(const methodHandle& dependee);
// tells how many nmethods have dependencies staticint number_of_nmethods_with_dependencies();
// CodeHeap State Analytics. // interface methods for CodeHeap printing, called by CompileBroker staticvoid aggregate(outputStream *out, size_t granularity); staticvoid discard(outputStream *out); staticvoid print_usedSpace(outputStream *out); staticvoid print_freeSpace(outputStream *out); staticvoid print_count(outputStream *out); staticvoid print_space(outputStream *out); staticvoid print_age(outputStream *out); staticvoid print_names(outputStream *out);
};
// Iterator to iterate over code blobs in the CodeCache. // The relaxed iterators only hold the CodeCache_lock across next calls template <class T, class Filter, bool is_relaxed> class CodeBlobIterator : public StackObj { public: enum LivenessFilter { all_blobs, only_not_unloading };
bool next_impl() { for (;;) { // Walk through heaps as required if (!next_blob()) { if (_heap == _end) { returnfalse;
}
++_heap; continue;
}
// Filter is_unloading as required if (_only_not_unloading) {
CompiledMethod* cm = _code_blob->as_compiled_method_or_null(); if (cm != NULL && cm->is_unloading()) { continue;
}
}
returntrue;
}
}
public:
CodeBlobIterator(LivenessFilter filter, T* nm = NULL)
: _only_not_unloading(filter == only_not_unloading)
{ if (Filter::heaps() == NULL) { // The iterator is supposed to shortcut since we have // _heap == _end, but make sure we do not have garbage // in other fields as well.
_code_blob = nullptr; return;
}
_heap = Filter::heaps()->begin();
_end = Filter::heaps()->end(); // If set to NULL, initialized by first call to next()
_code_blob = nm; if (nm != NULL) { while(!(*_heap)->contains_blob(_code_blob)) {
++_heap;
}
assert((*_heap)->contains_blob(_code_blob), "match not found");
}
}
// Advance iterator to next blob bool next() { if (is_relaxed) {
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); return next_impl();
} else {
assert_locked_or_safepoint(CodeCache_lock); return next_impl();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.