/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: set ts=8 sts=2 et sw=2 tw=80:
*/ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdefined(XP_WIN) # include "util/WindowsWrapper.h" # include <psapi.h> #elifdefined(__wasi__) // Nothing. #else # include <algorithm> # include <errno.h> # include <sys/mman.h> # include <sys/resource.h> # include <sys/stat.h> # include <sys/types.h> # include <unistd.h> #endif
BEGIN_TEST(testGCAllocator) { #ifdef JS_64BIT // If we're using the scattershot allocator, this test does not apply. if (js::gc::UsingScattershotAllocator()) { returntrue;
} #endif
size_t PageSize = js::gc::SystemPageSize();
/* Finish any ongoing background free activity. */
js::gc::FinishGC(cx);
bool addressesGrowUp(bool* resultOut) { /* * Try to detect whether the OS allocates memory in increasing or decreasing * address order by making several allocations and comparing the addresses.
*/
bool testGCAllocatorUp(const size_t PageSize) { const size_t UnalignedSize = StagingSize + Alignment - PageSize; void* chunkPool[MaxTempChunks]; // Allocate a contiguous chunk that we can partition for testing. void* stagingArea = mapMemory(UnalignedSize); if (!stagingArea) { returnfalse;
} // Ensure that the staging area is aligned.
unmapPages(stagingArea, UnalignedSize); if (offsetFromAligned(stagingArea)) { const size_t Offset = offsetFromAligned(stagingArea); // Place the area at the lowest aligned address.
stagingArea = (void*)(uintptr_t(stagingArea) + (Alignment - Offset));
}
mapMemoryAt(stagingArea, StagingSize); // Make sure there are no available chunks below the staging area. int tempChunks; if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false)) { returnfalse;
} // Unmap the staging area so we can set it up for testing.
unmapPages(stagingArea, StagingSize); // Check that the first chunk is used if it is aligned.
CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool,
tempChunks)); // Check that the first chunk is used if it can be aligned.
CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
tempChunks)); // Check that an aligned chunk after a single unalignable chunk is used.
CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
tempChunks)); // Check that we fall back to the slow path after two unalignable chunks.
CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
tempChunks)); // Check that we also fall back after an unalignable and an alignable chunk.
CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
tempChunks)); // Check that the last ditch allocator works as expected.
CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
tempChunks, UseLastDitchAllocator)); // Check that the last ditch allocator can deal with naturally aligned chunks.
CHECK(positionIsCorrect("x--xx--xoo------", stagingArea, chunkPool,
tempChunks, UseLastDitchAllocator));
bool testGCAllocatorDown(const size_t PageSize) { const size_t UnalignedSize = StagingSize + Alignment - PageSize; void* chunkPool[MaxTempChunks]; // Allocate a contiguous chunk that we can partition for testing. void* stagingArea = mapMemory(UnalignedSize); if (!stagingArea) { returnfalse;
} // Ensure that the staging area is aligned.
unmapPages(stagingArea, UnalignedSize); if (offsetFromAligned(stagingArea)) { void* stagingEnd = (void*)(uintptr_t(stagingArea) + UnalignedSize); const size_t Offset = offsetFromAligned(stagingEnd); // Place the area at the highest aligned address.
stagingArea = (void*)(uintptr_t(stagingEnd) - Offset - StagingSize);
}
mapMemoryAt(stagingArea, StagingSize); // Make sure there are no available chunks above the staging area. int tempChunks; if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true)) { returnfalse;
} // Unmap the staging area so we can set it up for testing.
unmapPages(stagingArea, StagingSize); // Check that the first chunk is used if it is aligned.
CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool,
tempChunks)); // Check that the first chunk is used if it can be aligned.
CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool,
tempChunks)); // Check that an aligned chunk after a single unalignable chunk is used.
CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool,
tempChunks)); // Check that we fall back to the slow path after two unalignable chunks.
CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool,
tempChunks)); // Check that we also fall back after an unalignable and an alignable chunk.
CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool,
tempChunks)); // Check that the last ditch allocator works as expected.
CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool,
tempChunks, UseLastDitchAllocator)); // Check that the last ditch allocator can deal with naturally aligned chunks.
CHECK(positionIsCorrect("------oox--xx--x", stagingArea, chunkPool,
tempChunks, UseLastDitchAllocator));
bool fillSpaceBeforeStagingArea(int& tempChunks, void* stagingArea, void** chunkPool, bool addressesGrowDown) { // Make sure there are no available chunks before the staging area.
tempChunks = 0;
chunkPool[tempChunks++] = mapMemory(2 * Chunk); while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] &&
(chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) {
chunkPool[tempChunks++] = mapMemory(2 * Chunk); if (!chunkPool[tempChunks - 1]) { break; // We already have our staging area, so OOM here is okay.
} if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
addressesGrowDown) { break; // The address growth direction is inconsistent!
}
} // OOM also means success in this case. if (!chunkPool[tempChunks - 1]) {
--tempChunks; returntrue;
} // Bail if we can't guarantee the right address space layout. if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown ||
(tempChunks > 1 &&
(chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
addressesGrowDown)) { while (--tempChunks >= 0) {
unmapPages(chunkPool[tempChunks], 2 * Chunk);
}
unmapPages(stagingArea, StagingSize); returnfalse;
} returntrue;
}
bool positionIsCorrect(constchar* str, void* base, void** chunkPool, int tempChunks,
AllocType allocator = UseNormalAllocator) { // str represents a region of memory, with each character representing a // region of Chunk bytes. str should contain only x, o and -, where // x = mapped by the test to set up the initial conditions, // o = mapped by the GC allocator, and // - = unmapped. // base should point to a region of contiguous free memory // large enough to hold strlen(str) chunks of Chunk bytes. int len = strlen(str); int i; // Find the index of the desired address. for (i = 0; i < len && str[i] != 'o'; ++i); void* desired = (void*)(uintptr_t(base) + i * Chunk); // Map the regions indicated by str. for (i = 0; i < len; ++i) { if (str[i] == 'x') {
mapMemoryAt((void*)(uintptr_t(base) + i * Chunk), Chunk);
}
} // Allocate using the GC's allocator. void* result; if (allocator == UseNormalAllocator) {
result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
} else {
result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
} // Clean up the mapped regions. if (result) {
js::gc::UnmapPages(result, 2 * Chunk);
} for (--i; i >= 0; --i) { if (str[i] == 'x') {
js::gc::UnmapPages((void*)(uintptr_t(base) + i * Chunk), Chunk);
}
} // CHECK returns, so clean up on failure. if (result != desired) { while (--tempChunks >= 0) {
js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
}
} return result == desired;
}
holder->setBuffer(alloc); if (nurseryOwned) { // Hack to force minor GC. We've marked our alloc 'nursery owned' even // though that isn't true.
NewPlainObject(cx); // Hack to force marking our holder.
cx->runtime()->gc.storeBuffer().putWholeCell(holder);
}
JS_GC(cx);
// Post GC marking state depends on whether allocation is small or not. // Small allocations will remain marked whereas others will have their // mark state cleared.
BEGIN_TEST(testBufferAllocator_predicatesOnOtherAllocs) { if (!cx->runtime()->gc.nursery().isEnabled()) {
fprintf(stderr, "Skipping test as nursery is disabled.\n");
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.