/* * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
if (increased > 0) { // Update atomically since we have concurrent readers
Atomic::add(&_capacity, increased);
// Record time of last commit. When allocation, we prefer increasing // the capacity over flushing the cache. That means there could be // expired pages in the cache at this time. However, since we are // increasing the capacity we are obviously in need of committed // memory and should therefore not be uncommitting memory.
_cache.set_last_commit();
}
return increased;
}
void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { // Update atomically since we have concurrent readers
Atomic::sub(&_capacity, size);
if (set_max_capacity) { // Adjust current max capacity to avoid further attempts to increase capacity
log_error_p(gc)("Forced to lower max Java heap size from "
SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
_current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
_capacity / M, percent_of(_capacity, _max_capacity));
// Update atomically since we have concurrent readers
Atomic::store(&_current_max_capacity, _capacity);
}
}
void ZPageAllocator::increase_used(size_t size, bool worker_relocation) { if (worker_relocation) { // Allocating a page for the purpose of worker relocation has // a negative contribution to the number of reclaimed bytes.
_reclaimed -= size;
}
// Update atomically since we have concurrent readers const size_t used = Atomic::add(&_used, size); if (used > _used_high) {
_used_high = used;
}
}
void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { // Only pages explicitly released with the reclaimed flag set // counts as reclaimed bytes. This flag is true when we release // a page after relocation, and is false when we release a page // to undo an allocation. if (reclaimed) {
_reclaimed += size;
}
// Update atomically since we have concurrent readers const size_t used = Atomic::sub(&_used, size); if (used < _used_low) {
_used_low = used;
}
}
do { // Start asynchronous GC
ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
// Wait for allocation to complete, fail or request a GC
result = allocation->wait();
} while (result == ZPageAllocationStallStartGC);
{ // // We grab the lock here for two different reasons: // // 1) Guard deletion of underlying semaphore. This is a workaround for // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy // the semaphore immediately after returning from sem_wait(). The // reason is that sem_post() can touch the semaphore after a waiting // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 // // 2) Guard the list of satisfied pages. //
ZLocker<ZLock> locker(&_lock);
_satisfied.remove(allocation);
}
// Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. // Flushed pages are also unmapped and destroyed asynchronously, so we // can't immediately reuse that part of the address space anyway. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) {
log_error(gc)("Out of address space"); return NULL;
}
ZPhysicalMemory pmem;
size_t flushed = 0;
// Harvest physical memory from flushed pages
ZListRemoveIterator<ZPage> iter(allocation->pages()); for (ZPage* page; iter.next(&page);) {
flushed += page->size();
// Allocate any remaining physical memory. Capacity and used has // already been adjusted, we just need to fetch the memory, which // is guaranteed to succeed. if (flushed < size) { const size_t remaining = size - flushed;
allocation->set_committed(remaining);
_physical.alloc(pmem, remaining);
}
// Create new page returnnew ZPage(allocation->type(), vmem, pmem);
}
bool ZPageAllocator::should_defragment(const ZPage* page) const { // A small page can end up at a high address (second half of the address space) // if we've split a larger page or we have a constrained address space. To help // fight address space fragmentation we remap such pages to a lower address, if // a lower address is available. return page->type() == ZPageTypeSmall &&
page->start() >= _virtual.reserved() / 2 &&
page->start() > _virtual.lowest_available_address();
}
bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const { // The allocation is immediately satisfied if the list of pages contains // exactly one page, with the type and size that was requested. However, // even if the allocation is immediately satisfied we might still want to // return false here to force the page to be remapped to fight address // space fragmentation.
if (allocation->pages()->size() != 1) { // Not a single page returnfalse;
}
const ZPage* const page = allocation->pages()->first(); if (page->type() != allocation->type() ||
page->size() != allocation->size()) { // Wrong type or size returnfalse;
}
if (should_defragment(page)) { // Defragment address space
ZStatInc(ZCounterDefragment); returnfalse;
}
// Allocation immediately satisfied returntrue;
}
ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) { // Fast path if (is_alloc_satisfied(allocation)) { return allocation->pages()->remove_first();
}
// Slow path
ZPage* const page = alloc_page_create(allocation); if (page == NULL) { // Out of address space return NULL;
}
// Failed or partially failed. Split of any successfully committed // part of the page into a new page and insert it into list of pages, // so that it will be re-inserted into the page cache.
ZPage* const committed_page = page->split_committed();
destroy_page(page);
if (committed_page != NULL) {
map_page(committed_page);
allocation->pages()->insert_last(committed_page);
}
// Allocate one or more pages from the page cache. If the allocation // succeeds but the returned pages don't cover the complete allocation, // then finalize phase is allowed to allocate the remaining memory // directly from the physical memory manager. Note that this call might // block in a safepoint if the non-blocking flag is not set. if (!alloc_page_or_stall(&allocation)) { // Out of memory return NULL;
}
ZPage* const page = alloc_page_finalize(&allocation); if (page == NULL) { // Failed to commit or map. Clean up and retry, in the hope that // we can still allocate by flushing the page cache (more aggressively).
alloc_page_failed(&allocation); goto retry;
}
// Reset page. This updates the page's sequence number and must // be done after we potentially blocked in a safepoint (stalled) // where the global sequence number was updated.
page->reset();
// Update allocation statistics. Exclude worker relocations to avoid // artificial inflation of the allocation rate during relocation. if (!flags.worker_relocation() && is_init_completed()) { // Note that there are two allocation rate counters, which have // different purposes and are sampled at different frequencies. const size_t bytes = page->size();
ZStatInc(ZCounterAllocationRate, bytes);
ZStatInc(ZStatAllocRate::counter(), bytes);
}
void ZPageAllocator::satisfy_stalled() { for (;;) {
ZPageAllocation* const allocation = _stalled.first(); if (allocation == NULL) { // Allocation queue is empty return;
}
if (!alloc_page_common(allocation)) { // Allocation could not be satisfied, give up return;
}
// Allocation succeeded, dequeue and satisfy allocation request. // Note that we must dequeue the allocation request first, since // it will immediately be deallocated once it has been satisfied.
_stalled.remove(allocation);
_satisfied.insert_last(allocation);
allocation->satisfy(ZPageAllocationStallSuccess);
}
}
size_t ZPageAllocator::uncommit(uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and // used, to make sure GC safepoints will have a consistent view. However, when // ZVerifyViews is enabled we need to join at a broader scope to also make sure // we don't change the address good mask after pages have been flushed, and // thereby made invisible to pages_do(), but before they have been unmapped.
SuspendibleThreadSetJoiner joiner(ZVerifyViews);
ZList<ZPage> pages;
size_t flushed;
// Never uncommit below min capacity. We flush out and uncommit chunks at // a time (~0.8% of the max capacity, but at least one granule and at most // 256M), in case demand for memory increases while we are uncommitting. const size_t retain = MAX2(_used, _min_capacity); const size_t release = _capacity - retain; const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M); const size_t flush = MIN2(release, limit);
// Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { if (allocation->seqnum() == ZGlobalSeqNum) { // Start a new GC cycle, keep allocation requests enqueued
allocation->satisfy(ZPageAllocationStallStartGC); return;
}
// Out of memory, fail allocation request
_stalled.remove(allocation);
_satisfied.insert_last(allocation);
allocation->satisfy(ZPageAllocationStallFailed);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.