/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// For small (less than half the page size) allocations, test every position // within many possible sizes.
size_t small_max =
stats.subpage_max ? stats.subpage_max : stats.quantum_wide_max; for (size_t n = 0; n <= small_max; n += 8) { auto p = (char*)moz_arena_malloc(arenaId, n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(small.append(p)); for (size_t j = 0; j < usable; j++) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
}
}
// Similar for large (small_max + 1 KiB .. 1MiB - 8KiB) allocations. for (size_t n = small_max + 1_KiB; n <= stats.large_max; n += 1_KiB) { auto p = (char*)moz_arena_malloc(arenaId, n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(large.append(p)); for (size_t j = 0; j < usable; j += 347) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
}
}
// Similar for huge (> 1MiB - 8KiB) allocations. for (size_t n = stats.chunksize; n <= 10_MiB; n += 512_KiB) { auto p = (char*)moz_arena_malloc(arenaId, n);
size_t usable = moz_malloc_size_of(p);
ASSERT_TRUE(huge.append(p)); for (size_t j = 0; j < usable; j += 567) {
jemalloc_ptr_info(&p[j], &info);
ASSERT_TRUE(InfoEq(info, TagLiveAlloc, p, usable, arenaId));
}
}
// The following loops check freed allocations. We step through the vectors // using prime-sized steps, which gives full coverage of the arrays while // avoiding deallocating in the same order we allocated.
size_t len;
// Free the small allocations and recheck them. int isFreedAlloc = 0, isFreedPage = 0;
len = small.length(); for (size_t i = 0, j = 0; i < len; i++, j = (j + 19) % len) { char* p = small[j];
size_t usable = moz_malloc_size_of(p);
free(p); for (size_t k = 0; k < usable; k++) {
jemalloc_ptr_info(&p[k], &info); // There are two valid outcomes here. if (InfoEq(info, TagFreedAlloc, p, usable, arenaId)) {
isFreedAlloc++;
} elseif (InfoEqFreedPage(info, &p[k], stats.page_size, arenaId)) {
isFreedPage++;
} else {
ASSERT_TRUE(false);
}
}
} // There should be both FreedAlloc and FreedPage results, but a lot more of // the former.
ASSERT_TRUE(isFreedAlloc != 0);
ASSERT_TRUE(isFreedPage != 0);
ASSERT_TRUE(isFreedAlloc / isFreedPage > 8);
// Free the large allocations and recheck them.
len = large.length(); for (size_t i = 0, j = 0; i < len; i++, j = (j + 31) % len) { char* p = large[j];
size_t usable = moz_malloc_size_of(p);
free(p); for (size_t k = 0; k < usable; k += 357) {
jemalloc_ptr_info(&p[k], &info);
ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size, arenaId));
}
}
// Free the huge allocations and recheck them.
len = huge.length(); for (size_t i = 0, j = 0; i < len; i++, j = (j + 7) % len) { char* p = huge[j];
size_t usable = moz_malloc_size_of(p);
free(p); for (size_t k = 0; k < usable; k += 587) {
jemalloc_ptr_info(&p[k], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
}
}
// Chunk header.
UniquePtr<int> p = MakeUnique<int>();
size_t chunksizeMask = stats.chunksize - 1; char* chunk = (char*)(uintptr_t(p.get()) & ~chunksizeMask);
size_t chunkHeaderSize = stats.chunksize - stats.large_max - stats.page_size; for (size_t i = 0; i < chunkHeaderSize; i += 64) {
jemalloc_ptr_info(&chunk[i], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
}
// Run header.
size_t page_sizeMask = stats.page_size - 1; char* run = (char*)(uintptr_t(p.get()) & ~page_sizeMask); for (size_t i = 0; i < 4 * sizeof(void*); i++) {
jemalloc_ptr_info(&run[i], &info);
ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
}
// Entire chunk. It's impossible to check what is put into |info| for all of // these addresses; this is more about checking that we don't crash. for (size_t i = 0; i < stats.chunksize; i += 256) {
jemalloc_ptr_info(&chunk[i], &info);
}
TEST(Jemalloc, Arenas)
{
arena_id_t arena = moz_create_arena();
ASSERT_TRUE(arena != 0); void* ptr = moz_arena_malloc(arena, 42);
ASSERT_TRUE(ptr != nullptr);
ptr = moz_arena_realloc(arena, ptr, 64);
ASSERT_TRUE(ptr != nullptr);
moz_arena_free(arena, ptr);
ptr = moz_arena_calloc(arena, 24, 2); // For convenience, free can be used to free arena pointers.
free(ptr);
moz_dispose_arena(arena);
// Avoid death tests adding some unnecessary (long) delays.
SAVE_GDB_SLEEP_LOCAL();
// Can't use an arena after it's disposed. // ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
// Arena id 0 can't be used to somehow get to the main arena.
ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
arena = moz_create_arena();
arena_id_t arena2 = moz_create_arena(); // Ensure arena2 is used to prevent OSX errors:
(void)arena2;
// For convenience, realloc can also be used to reallocate arena pointers. // The result should be in the same arena. Test various size class // transitions. for (size_t from_size : sSizes) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size); for (size_t to_size : sSizes) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size);
ptr = moz_arena_malloc(arena, from_size);
ptr = realloc(ptr, to_size); // Freeing with the wrong arena should crash.
ASSERT_DEATH_WRAP(moz_arena_free(arena2, ptr), ""); // Likewise for moz_arena_realloc.
ASSERT_DEATH_WRAP(moz_arena_realloc(arena2, ptr, from_size), ""); // The following will crash if it's not in the right arena.
moz_arena_free(arena, ptr);
}
}
// Check that a buffer aPtr is entirely filled with a given character from // aOffset to aSize. For faster comparison, the caller is required to fill a // reference buffer with the wanted character, and give the size of that // reference buffer. staticvoid bulk_compare(char* aPtr, size_t aOffset, size_t aSize, char* aReference, size_t aReferenceSize) { for (size_t i = aOffset; i < aSize; i += aReferenceSize) {
size_t length = std::min(aSize - i, aReferenceSize); if (memcmp(aPtr + i, aReference, length)) { // We got a mismatch, we now want to report more precisely where. for (size_t j = i; j < i + length; j++) {
ASSERT_EQ(aPtr[j], *aReference);
}
}
}
}
// A range iterator for size classes between two given values. class SizeClassesBetween { public:
SizeClassesBetween(size_t aStart, size_t aEnd) : mStart(aStart), mEnd(aEnd) {}
class Iterator { public: explicit Iterator(size_t aValue) : mValue(malloc_good_size(aValue)) {}
staticbool CanReallocInPlace(size_t aFromSize, size_t aToSize,
jemalloc_stats_t& aStats) { // PHC allocations must be disabled because PHC reallocs differently to // mozjemalloc. #ifdef MOZ_PHC
MOZ_RELEASE_ASSERT(!mozilla::phc::IsPHCEnabledOnCurrentThread()); #endif
if (aFromSize == malloc_good_size(aToSize)) { // Same size class: in-place. returntrue;
} if (aFromSize >= aStats.page_size && aFromSize <= aStats.large_max &&
aToSize >= aStats.page_size && aToSize <= aStats.large_max) { // Any large class to any large class: in-place when there is space to. returntrue;
} if (IsSameRoundedHugeClass(aFromSize, aToSize, aStats)) { // Huge sizes that round up to the same multiple of the chunk size: // in-place. returntrue;
} returnfalse;
}
TEST(Jemalloc, InPlace)
{ // Disable PHC allocations for this test, because CanReallocInPlace() isn't // valid for PHC allocations.
AutoDisablePHCOnCurrentThread disable;
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Using a separate arena, which is always emptied after an iteration, ensures // that in-place reallocation happens in all cases it can happen. This test is // intended for developers to notice they may have to adapt other tests if // they change the conditions for in-place reallocation.
arena_id_t arena = moz_create_arena();
// Bug 1474254: disable this test for windows ccov builds because it leads to // timeout. #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
TEST(Jemalloc, JunkPoison)
{ // Disable PHC allocations for this test, because CanReallocInPlace() isn't // valid for PHC allocations, and the testing UAFs aren't valid.
AutoDisablePHCOnCurrentThread disable;
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Avoid death tests adding some unnecessary (long) delays.
SAVE_GDB_SLEEP_LOCAL();
// Create buffers in a separate arena, for faster comparisons with // bulk_compare.
arena_id_t buf_arena = moz_create_arena(); char* junk_buf = (char*)moz_arena_malloc(buf_arena, stats.page_size); // Depending on its configuration, the allocator will either fill the // requested allocation with the junk byte (0xe4) or with zeroes, or do // nothing, in which case, since we're allocating in a fresh arena, // we'll be getting zeroes. char junk = stats.opt_junk ? '\xe4' : '\0'; for (size_t i = 0; i < stats.page_size; i++) {
ASSERT_EQ(junk_buf[i], junk);
}
arena_params_t params; // Allow as many dirty pages in the arena as possible, so that purge never // happens in it. Purge breaks some of the tests below randomly depending on // what other things happen on other threads.
params.mMaxDirty = size_t(-1);
arena_id_t arena = moz_create_arena_with_params(¶ms);
// Mozjemalloc is configured to only poison the first four cache lines. const size_t poison_check_len = 256;
// Allocating should junk the buffer, and freeing should poison the buffer. for (size_t size : sSizes) { if (size <= stats.large_max) {
SCOPED_TRACE(testing::Message() << "size = " << size); char* buf = (char*)moz_arena_malloc(arena, size);
size_t allocated = moz_malloc_usable_size(buf); if (stats.opt_junk || stats.opt_zero) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(buf, 0, allocated, junk_buf, stats.page_size));
}
moz_arena_free(arena, buf); // We purposefully do a use-after-free here, to check that the data was // poisoned.
ASSERT_NO_FATAL_FAILURE(
bulk_compare(buf, 0, std::min(allocated, poison_check_len),
poison_buf, stats.page_size));
}
}
// Shrinking in the same size class should be in place and poison between the // new allocation size and the old one.
size_t prev = 0; for (size_t size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "size = " << size);
SCOPED_TRACE(testing::Message() << "prev = " << prev); char* ptr = (char*)moz_arena_malloc(arena, size);
memset(ptr, fill, moz_malloc_usable_size(ptr)); char* ptr2 = (char*)moz_arena_realloc(arena, ptr, prev + 1);
ASSERT_EQ(ptr, ptr2);
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, prev + 1, fill_buf, stats.page_size));
ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr, prev + 1,
std::min(size, poison_check_len),
poison_buf, stats.page_size));
moz_arena_free(arena, ptr);
prev = size;
}
// In-place realloc should junk the new bytes when growing and poison the old // bytes when shrinking. for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size); for (size_t to_size : sSizes) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size); if (CanReallocInPlace(from_size, to_size, stats)) { char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, moz_malloc_usable_size(ptr)); char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_EQ(ptr, ptr2); // Shrinking allocation if (from_size >= to_size) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, to_size, fill_buf, stats.page_size)); // Huge allocations have guards and will crash when accessing // beyond the valid range. if (to_size > stats.large_max) {
size_t page_limit = ALIGNMENT_CEILING(to_size, stats.page_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr, to_size, std::min(page_limit, poison_check_len),
poison_buf, stats.page_size));
ASSERT_DEATH_WRAP(ptr[page_limit] = 0, "");
} else {
ASSERT_NO_FATAL_FAILURE(bulk_compare(
ptr, to_size, std::min(from_size, poison_check_len), poison_buf,
stats.page_size));
}
} else { // Enlarging allocation
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, from_size, fill_buf, stats.page_size)); if (stats.opt_junk || stats.opt_zero) {
ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr, from_size, to_size,
junk_buf, stats.page_size));
} // Huge allocation, so should have a guard page following if (to_size > stats.large_max) {
ASSERT_DEATH_WRAP(
ptr[ALIGNMENT_CEILING(to_size, stats.page_size)] = 0, "");
}
}
moz_arena_free(arena, ptr2);
}
}
}
// Growing to a different size class should poison the old allocation, // preserve the original bytes, and junk the new bytes in the new allocation. for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size); for (size_t to_size : sSizes) { if (from_size < to_size && malloc_good_size(to_size) != from_size &&
!IsSameRoundedHugeClass(from_size, to_size, stats)) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size); char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, moz_malloc_usable_size(ptr)); // Avoid in-place realloc by allocating a buffer, expecting it to be // right after the buffer we just received. Buffers smaller than the // page size and exactly or larger than the size of the largest large // size class can't be reallocated in-place. char* avoid_inplace = nullptr; if (from_size >= stats.page_size && from_size < stats.large_max) {
avoid_inplace = (char*)moz_arena_malloc(arena, stats.page_size);
ASSERT_EQ(ptr + from_size, avoid_inplace);
} char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_NE(ptr, ptr2); if (from_size <= stats.large_max) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, std::min(from_size, poison_check_len),
poison_buf, stats.page_size));
}
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr2, 0, from_size, fill_buf, stats.page_size)); if (stats.opt_junk || stats.opt_zero) {
size_t rounded_to_size = malloc_good_size(to_size);
ASSERT_NE(to_size, rounded_to_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2, from_size, rounded_to_size,
junk_buf, stats.page_size));
}
moz_arena_free(arena, ptr2);
moz_arena_free(arena, avoid_inplace);
}
}
}
// Shrinking to a different size class should poison the old allocation, // preserve the original bytes, and junk the extra bytes in the new // allocation. for (size_t from_size : SizeClassesBetween(1, 2 * stats.chunksize)) {
SCOPED_TRACE(testing::Message() << "from_size = " << from_size); for (size_t to_size : sSizes) { if (from_size > to_size &&
!CanReallocInPlace(from_size, to_size, stats)) {
SCOPED_TRACE(testing::Message() << "to_size = " << to_size); char* ptr = (char*)moz_arena_malloc(arena, from_size);
memset(ptr, fill, from_size); char* ptr2 = (char*)moz_arena_realloc(arena, ptr, to_size);
ASSERT_NE(ptr, ptr2); if (from_size <= stats.large_max) {
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr, 0, std::min(from_size, poison_check_len),
poison_buf, stats.page_size));
}
ASSERT_NO_FATAL_FAILURE(
bulk_compare(ptr2, 0, to_size, fill_buf, stats.page_size)); if (stats.opt_junk || stats.opt_zero) {
size_t rounded_to_size = malloc_good_size(to_size);
ASSERT_NE(to_size, rounded_to_size);
ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2, from_size, rounded_to_size,
junk_buf, stats.page_size));
}
moz_arena_free(arena, ptr2);
}
}
}
TEST(Jemalloc, TrailingGuard)
{ // Disable PHC allocations for this test, because even a single PHC // allocation occurring can throw it off.
AutoDisablePHCOnCurrentThread disable;
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Avoid death tests adding some unnecessary (long) delays.
SAVE_GDB_SLEEP_LOCAL();
// Do enough large allocations to fill a chunk, and then one additional one, // and check that the guard page is still present after the one-but-last // allocation, i.e. that we didn't allocate the guard.
Vector<void*> ptr_list; for (size_t cnt = 0; cnt < stats.large_max / stats.page_size; cnt++) { void* ptr = moz_arena_malloc(arena, stats.page_size);
ASSERT_TRUE(ptr != nullptr);
ASSERT_TRUE(ptr_list.append(ptr));
}
TEST(Jemalloc, LeadingGuard)
{ // Disable PHC allocations for this test, because even a single PHC // allocation occurring can throw it off.
AutoDisablePHCOnCurrentThread disable;
jemalloc_stats_t stats;
jemalloc_stats(&stats);
// Avoid death tests adding some unnecessary (long) delays.
SAVE_GDB_SLEEP_LOCAL();
// Do a simple normal allocation, but force all the allocation space // in the chunk to be used up. This allows us to check that we get // the safe area right in the logic that follows (all memory will be // committed and initialized), and it forces this pointer to the start // of the zone to sit at the very start of the usable chunk area. void* ptr = moz_arena_malloc(arena, stats.large_max);
ASSERT_TRUE(ptr != nullptr); // If ptr is chunk-aligned, the above allocation went wrong. void* chunk_start = (void*)ALIGNMENT_FLOOR((uintptr_t)ptr, stats.chunksize);
ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start); // If ptr is 1 page after the chunk start (so right after the header), // we must have missed adding the guard page.
ASSERT_NE((uintptr_t)ptr, (uintptr_t)chunk_start + stats.page_size); // The actual start depends on the amount of metadata versus the page // size, so we can't check equality without pulling in too many // implementation details.
// Guard page should be right before data area void* guard_page = (void*)(((uintptr_t)ptr) - sizeof(void*));
jemalloc_ptr_info_t info;
jemalloc_ptr_info(guard_page, &info);
ASSERT_TRUE(info.tag == TagUnknown);
ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
// Avoid death tests adding some unnecessary (long) delays.
SAVE_GDB_SLEEP_LOCAL();
arena_id_t arena = moz_create_arena(); void* ptr = moz_arena_malloc(arena, 42); // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
moz_arena_free(arena, ptr);
moz_dispose_arena(arena);
arena = moz_create_arena();
ptr = moz_arena_malloc(arena, stats.page_size * 2); // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
moz_arena_free(arena, ptr);
moz_dispose_arena(arena);
arena = moz_create_arena();
ptr = moz_arena_malloc(arena, stats.chunksize * 2); #ifdef MOZ_DEBUG // On debug builds, we do the expensive check that arenas are empty.
ASSERT_DEATH_WRAP(moz_dispose_arena(arena), "");
moz_arena_free(arena, ptr);
moz_dispose_arena(arena); #else // Currently, the allocator can't trivially check whether the arena is empty // of huge allocations, so disposing of it works.
moz_dispose_arena(arena); // But trying to free a pointer that belongs to it will MOZ_CRASH.
ASSERT_DEATH_WRAP(free(ptr), ""); // Likewise for realloc
ASSERT_DEATH_WRAP(ptr = realloc(ptr, stats.chunksize * 3), ""); #endif
// Using the arena after it's been disposed of is MOZ_CRASH-worthy.
ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 42), "");
|| baseline.num_operations + num_ops != stats.num_operations)) { // All the tests that check stats, perform some operation, then check stats // again can race with other threads. But the test can't be made thread // safe without a sagnificant amount of work. However this IS a problem // when stepping through the test using a debugger, since other threads are // likely to run while the current thread is paused. Instead of neading a // debugger our printf here can help understand a failing test.
fprintf(stderr, "Check stats failed after iteration %u operation %s\n",
iteration, operation);
TEST(Jemalloc, StatsLite)
{ // Disable PHC allocations for this test, because even a single PHC // allocation occurring can throw it off.
AutoDisablePHCOnCurrentThread disable;
// Use this data to make an allocation, resize it twice, then free it. Some // The data uses a few size classes and does a combination of in-place and // moving reallocations. struct { // The initial allocation size.
size_t initial; // The first reallocation size and number of operations of the reallocation.
size_t next;
size_t next_ops; // The final reallocation size and number of operations of the reallocation.
size_t last;
size_t last_ops;
} TestData[] = { /* clang-format off */
{ 16, 15, 0, 256, 2},
{128_KiB, 64_KiB, 1, 68_KiB, 1},
{ 4_MiB, 16_MiB, 2, 3_MiB, 2},
{ 16_KiB, 512, 2, 32_MiB, 2}, /* clang-format on */
};
arena_id_t my_arena = moz_create_arena();
unsigned i = 0; for (auto data : TestData) { // Assert that the API returns /something/ a bit sensible.
jemalloc_stats_lite_t baseline;
jemalloc_stats_lite(&baseline);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.2Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.