/* * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Return a single chunk to the freelist and adjust accounting. No merge is attempted. void ChunkManager::return_chunk_simple_locked(Metachunk* c) {
assert_lock_strong(Metaspace_lock);
DEBUG_ONLY(c->verify());
_chunks.add(c);
c->reset_used_words(); // Tracing
log_debug(metaspace)("ChunkManager %s: returned chunk " METACHUNK_FORMAT ".",
_name, METACHUNK_FORMAT_ARGS(c));
}
// Creates a chunk manager with a given name (which is for debug purposes only) // and an associated space list which will be used to request new chunks from // (see get_chunk())
ChunkManager::ChunkManager(constchar* name, VirtualSpaceList* space_list) :
_vslist(space_list),
_name(name),
_chunks()
{
}
// Given a chunk, split it into a target chunk of a smaller size (higher target level) // and at least one, possible several splinter chunks. // The original chunk must be outside of the freelist and its state must be free. // The splinter chunks are added to the freelist. // The resulting target chunk will be located at the same address as the original // chunk, but it will of course be smaller (of a higher level). // The committed areas within the original chunk carry over to the resulting // chunks. void ChunkManager::split_chunk_and_add_splinters(Metachunk* c, chunklevel_t target_level) {
assert_lock_strong(Metaspace_lock);
assert(c->is_free(), "chunk to be split must be free.");
assert(c->level() < target_level, "Target level must be higher than current level.");
assert(c->prev() == NULL && c->next() == NULL, "Chunk must be outside of any list.");
// Splitting should never fail.
assert(c->level() == target_level, "Sanity");
// The size of the committed portion should not change (subject to the reduced chunk size of course) #ifdef ASSERT if (committed_words_before > c->word_size()) {
assert(c->is_fully_committed(), "Sanity");
} else {
assert(c->committed_words() == committed_words_before, "Sanity");
}
c->verify();
verify_locked();
SOMETIMES(c->vsnode()->verify_locked();) #endif
InternalStats::inc_num_chunk_splits();
}
// On success, returns a chunk of level of <preferred_level>, but at most <max_level>. // The first first <min_committed_words> of the chunk are guaranteed to be committed. // On error, will return NULL. // // This function may fail for two reasons: // - Either we are unable to reserve space for a new chunk (if the underlying VirtualSpaceList // is non-expandable but needs expanding - aka out of compressed class space). // - Or, if the necessary space cannot be committed because we hit a commit limit. // This may be either the GC threshold or MaxMetaspaceSize.
Metachunk* ChunkManager::get_chunk(chunklevel_t preferred_level, chunklevel_t max_level, size_t min_committed_words) {
assert(preferred_level <= max_level, "Sanity");
assert(chunklevel::level_fitting_word_size(min_committed_words) >= max_level, "Sanity");
// First, optimistically look for a chunk which is already committed far enough to hold min_word_size.
// 1) Search best or smaller committed chunks (first attempt): // Start at the preferred chunk size and work your way down (level up). // But for now, only consider chunks larger than a certain threshold - // this is to prevent large loaders (eg boot) from unnecessarily gobbling up // all the tiny splinter chunks lambdas leave around.
Metachunk* c = NULL;
c = _chunks.search_chunk_ascending(preferred_level, MIN2((chunklevel_t)(preferred_level + 2), max_level), min_committed_words);
// 2) Search larger committed chunks: // If that did not yield anything, look at larger chunks, which may be committed. We would have to split // them first, of course. if (c == NULL) {
c = _chunks.search_chunk_descending(preferred_level, min_committed_words);
} // 3) Search best or smaller committed chunks (second attempt): // Repeat (1) but now consider even the tiniest chunks as long as they are large enough to hold the // committed min size. if (c == NULL) {
c = _chunks.search_chunk_ascending(preferred_level, max_level, min_committed_words);
} // if we did not get anything yet, there are no free chunks committed enough. Repeat search but look for uncommitted chunks too: // 4) Search best or smaller chunks, can be uncommitted: if (c == NULL) {
c = _chunks.search_chunk_ascending(preferred_level, max_level, 0);
} // 5) Search a larger uncommitted chunk: if (c == NULL) {
c = _chunks.search_chunk_descending(preferred_level, 0);
}
if (c != NULL) {
UL(trace, "taken from freelist.");
}
// Failing all that, allocate a new root chunk from the connected virtual space. // This may fail if the underlying vslist cannot be expanded (e.g. compressed class space) if (c == NULL) {
c = _vslist->allocate_root_chunk(); if (c == NULL) {
UL(info, "failed to get new root chunk.");
} else {
assert(c->level() == chunklevel::ROOT_CHUNK_LEVEL, "root chunk expected");
UL(debug, "allocated new root chunk.");
}
} if (c == NULL) { // If we end up here, we found no match in the freelists and were unable to get a new // root chunk (so we used up all address space, e.g. out of CompressedClassSpace).
UL2(info, "failed to get chunk (preferred level: " CHKLVL_FORMAT ", max level " CHKLVL_FORMAT ".", preferred_level, max_level);
c = NULL;
} if (c != NULL) { // Now we have a chunk. // It may be larger than what the caller wanted, so we may want to split it. This should // always work. if (c->level() < preferred_level) {
split_chunk_and_add_splinters(c, preferred_level);
assert(c->level() == preferred_level, "split failed?");
} // Attempt to commit the chunk (depending on settings, we either fully commit it or just // commit enough to get the caller going). That may fail if we hit a commit limit. In // that case put the chunk back to the freelist (re-merging it with its neighbors if we // did split it) and return NULL. const size_t to_commit = Settings::new_chunks_are_fully_committed() ? c->word_size() : min_committed_words; if (c->committed_words() < to_commit) { if (c->ensure_committed_locked(to_commit) == false) {
UL2(info, "failed to commit " SIZE_FORMAT " words on chunk " METACHUNK_FORMAT ".",
to_commit, METACHUNK_FORMAT_ARGS(c));
return_chunk_locked(c);
c = NULL;
}
} if (c != NULL) { // Still here? We have now a good chunk, all is well.
assert(c->committed_words() >= min_committed_words, "Sanity");
// Any chunk returned from ChunkManager shall be marked as in use.
c->set_in_use();
UL2(debug, "handing out chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
// Return a single chunk to the ChunkManager and adjust accounting. May merge chunk // with neighbors. // As a side effect this removes the chunk from whatever list it has been in previously. // Happens after a Classloader was unloaded and releases its metaspace chunks. // !! Note: this may invalidate the chunk. Do not access the chunk after // this function returns !! void ChunkManager::return_chunk(Metachunk* c) {
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
return_chunk_locked(c);
}
// See return_chunk(). void ChunkManager::return_chunk_locked(Metachunk* c) {
assert_lock_strong(Metaspace_lock);
UL2(debug, ": returning chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
DEBUG_ONLY(c->verify();)
assert(contains_chunk(c) == false, "A chunk to be added to the freelist must not be in the freelist already.");
assert(c->is_in_use() || c->is_free(), "Unexpected chunk state");
assert(!c->in_list(), "Remove from list first");
Metachunk* merged = NULL; if (!c->is_root_chunk()) { // Only attempt merging if we are not of the lowest level already.
merged = c->vsnode()->merge(c, &_chunks);
}
if (merged != NULL) {
InternalStats::inc_num_chunk_merges();
DEBUG_ONLY(merged->verify()); // We did merge chunks and now have a bigger chunk.
assert(merged->level() < orig_lvl, "Sanity");
UL2(debug, "merged into chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(merged));
c = merged;
}
// Given a chunk c, whose state must be "in-use" and must not be a root chunk, attempt to // enlarge it in place by claiming its trailing buddy. // // This will only work if c is the leader of the buddy pair and the trailing buddy is free. // // If successful, the follower chunk will be removed from the freelists, the leader chunk c will // double in size (level decreased by one). // // On success, true is returned, false otherwise. bool ChunkManager::attempt_enlarge_chunk(Metachunk* c) {
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag); return c->vsnode()->attempt_enlarge_chunk(c, &_chunks);
}
// We return unused memory to the Operating System: we iterate over all // free chunks and uncommit the backing memory of those large enough to // contain one or multiple commit granules (chunks larger than a granule // always cover a whole number of granules and start at a granule boundary). if (Settings::uncommit_free_chunks()) { const chunklevel_t max_level =
chunklevel::level_fitting_word_size(Settings::commit_granule_words()); for (chunklevel_t l = chunklevel::LOWEST_CHUNK_LEVEL;
l <= max_level;
l++) { // Since we uncommit all chunks at this level, we do not break the "committed chunks are // at the front of the list" condition. for (Metachunk* c = _chunks.first_at_level(l); c != NULL; c = c->next()) {
c->uncommit_locked();
}
}
}
// Calculates the total number of committed words over all chunks. Walks chunks.
size_t ChunkManager::calc_committed_word_size() const {
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag); return calc_committed_word_size_locked();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.