/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: set ts=8 sts=2 et sw=2 tw=80: * * Copyright (C) 2008 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
void ExecutablePool::addRef() { // It should be impossible for us to roll over, because only small // pools have multiple holders, and they have one holder per chunk // of generated code, and they only hold 16KB or so of code.
MOZ_ASSERT(m_refCount);
++m_refCount;
MOZ_ASSERT(m_refCount, "refcount overflow");
}
void* ExecutablePool::alloc(size_t n, CodeKind kind) {
MOZ_ASSERT(n <= available()); void* result = m_freePtr;
m_freePtr += n;
ExecutableAllocator::~ExecutableAllocator() { for (size_t i = 0; i < m_smallPools.length(); i++) {
m_smallPools[i]->release(/* willDestroy = */ true);
}
// If this asserts we have a pool leak.
MOZ_ASSERT(m_pools.empty());
}
ExecutablePool* ExecutableAllocator::poolForSize(size_t n) { // Try to fit in an existing small allocator. Use the pool with the // least available space that is big enough (best-fit). This is the // best strategy because (a) it maximizes the chance of the next // allocation fitting in a small pool, and (b) it minimizes the // potential waste when a small pool is next abandoned.
ExecutablePool* minPool = nullptr; for (size_t i = 0; i < m_smallPools.length(); i++) {
ExecutablePool* pool = m_smallPools[i]; if (n <= pool->available() &&
(!minPool || pool->available() < minPool->available())) {
minPool = pool;
}
} if (minPool) {
minPool->addRef(); return minPool;
}
// If the request is large, we just provide a unshared allocator if (n > ExecutableCodePageSize) { return createPool(n);
}
// Create a new allocator
ExecutablePool* pool = createPool(ExecutableCodePageSize); if (!pool) { return nullptr;
} // At this point, local |pool| is the owner.
if (m_smallPools.length() < maxSmallPools) { // We haven't hit the maximum number of live pools; add the new pool. // If append() OOMs, we just return an unshared allocator. if (m_smallPools.append(pool)) {
pool->addRef();
}
} else { // Find the pool with the least space. int iMin = 0; for (size_t i = 1; i < m_smallPools.length(); i++) { if (m_smallPools[i]->available() < m_smallPools[iMin]->available()) {
iMin = i;
}
}
// If the new allocator will result in more free space than the small // pool with the least space, then we will use it instead
ExecutablePool* minPool = m_smallPools[iMin]; if ((pool->available() - n) > minPool->available()) {
minPool->release();
m_smallPools[iMin] = pool;
pool->addRef();
}
}
ExecutablePool::Allocation a = systemAlloc(allocSize); if (!a.pages) { return nullptr;
}
ExecutablePool* pool = js_new<ExecutablePool>(this, a); if (!pool) {
systemRelease(a); return nullptr;
}
if (!m_pools.put(pool)) { // Note: this will call |systemRelease(a)|.
js_delete(pool); return nullptr;
}
return pool;
}
void* ExecutableAllocator::alloc(JSContext* cx, size_t n,
ExecutablePool** poolp, CodeKind type) { // Caller must ensure 'n' is word-size aligned. If all allocations are // of word sized quantities, then all subsequent allocations will be // aligned.
MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
*poolp = poolForSize(n); if (!*poolp) { return nullptr;
}
// This alloc is infallible because poolForSize() just obtained // (found, or created if necessary) a pool that had enough space. void* result = (*poolp)->alloc(n, type);
MOZ_ASSERT(result);
// Pool may not be present in m_pools if we hit OOM during creation. if (auto ptr = m_pools.lookup(pool)) {
m_pools.remove(ptr);
}
}
void ExecutableAllocator::purge() { for (size_t i = 0; i < m_smallPools.length();) {
ExecutablePool* pool = m_smallPools[i]; if (pool->m_refCount > 1) { // Releasing this pool is not going to deallocate it, so we might as // well hold on to it and reuse it for future allocations.
i++; continue;
}
#ifdef DEBUG // Make sure no pools have the mark bit set. for (size_t i = 0; i < ranges.length(); i++) {
MOZ_ASSERT(!ranges[i].pool->isMarked());
} #endif
{
AutoMarkJitCodeWritableForThread writable;
for (size_t i = 0; i < ranges.length(); i++) {
ExecutablePool* pool = ranges[i].pool; if (pool->m_refCount == 1) { // This is the last reference so the release() call below will // unmap the memory. Don't bother poisoning it. continue;
}
MOZ_ASSERT(pool->m_refCount > 1);
// Use the pool's mark bit to indicate we made the pool writable. // This avoids reprotecting a pool multiple times. if (!pool->isMarked()) {
reprotectPool(rt, pool, ProtectionSetting::Writable,
MustFlushICache::No);
pool->mark();
}
// Note: we use memset instead of js::Poison because we want to poison // JIT code in release builds too. Furthermore, we don't want the // invalid-ObjectValue poisoning js::Poison does in debug builds.
memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
MOZ_MAKE_MEM_NOACCESS(ranges[i].start, ranges[i].size);
}
}
// Make the pools executable again and drop references. We don't flush the // ICache here to not add extra overhead. for (size_t i = 0; i < ranges.length(); i++) {
ExecutablePool* pool = ranges[i].pool; if (pool->isMarked()) {
reprotectPool(rt, pool, ProtectionSetting::Executable,
MustFlushICache::No);
pool->unmark();
}
pool->release();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.