/* * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1: * * "If the signal occurs other than as the result of calling the abort or raise * function, the behavior is undefined if the signal handler refers to any * object with static storage duration other than by assigning a value to an * object declared as volatile sig_atomic_t"
*/ staticvolatile sig_atomic_t signal_jump_set; static sigjmp_buf signal_jmp_buf;
/* * How is the test backing the mapping being tested?
*/ enum backing_type {
ANON_BACKED,
SHMEM_BACKED,
LOCAL_FILE_BACKED,
};
FIXTURE(guard_regions)
{ unsignedlong page_size; char path[PATH_MAX]; int fd;
};
staticvoid handle_fatal(int c)
{ if (!signal_jump_set) return;
siglongjmp(signal_jmp_buf, c);
}
static ssize_t sys_process_madvise(int pidfd, conststruct iovec *iovec,
size_t n, int advice, unsignedint flags)
{ return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags);
}
/* * Enable our signal catcher and try to read/write the specified buffer. The * return value indicates whether the read/write succeeds without a fatal * signal.
*/ staticbool try_access_buf(char *ptr, bool write)
{ bool failed;
/* Tell signal handler to jump back here on fatal signal. */
signal_jump_set = true; /* If a fatal signal arose, we will jump back here and failed is set. */
failed = sigsetjmp(signal_jmp_buf, 0) != 0;
if (!failed) { if (write)
*ptr = 'x'; else
FORCE_READ(*ptr);
}
signal_jump_set = false; return !failed;
}
/* Try and read from a buffer, return true if no fatal signal. */ staticbool try_read_buf(char *ptr)
{ return try_access_buf(ptr, false);
}
/* Try and write to a buffer, return true if no fatal signal. */ staticbool try_write_buf(char *ptr)
{ return try_access_buf(ptr, true);
}
/* * Try and BOTH read from AND write to a buffer, return true if BOTH operations * succeed.
*/ staticbool try_read_write_buf(char *ptr)
{ return try_read_buf(ptr) && try_write_buf(ptr);
}
/* Establish a varying pattern in a buffer. */ staticvoid set_pattern(char *ptr, size_t num_pages, size_t page_size)
{
size_t i;
for (i = 0; i < num_pages; i++) { char *ptr2 = &ptr[i * page_size];
memset(ptr2, 'a' + (i % 26), page_size);
}
}
/* * Check that a buffer contains the pattern set by set_pattern(), starting at a * page offset of pgoff within the buffer.
*/ staticbool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
size_t pgoff)
{
size_t i;
for (i = 0; i < num_pages * page_size; i++) {
size_t offset = pgoff * page_size + i; char actual = ptr[offset]; char expected = 'a' + ((offset / page_size) % 26);
if (actual != expected) returnfalse;
}
returntrue;
}
/* Check that a buffer contains the pattern set by set_pattern(). */ staticbool check_pattern(char *ptr, size_t num_pages, size_t page_size)
{ return check_pattern_offset(ptr, num_pages, page_size, 0);
}
/* Determine if a buffer contains only repetitions of a specified char. */ staticbool is_buf_eq(char *buf, size_t size, char chr)
{
size_t i;
for (i = 0; i < size; i++) { if (buf[i] != chr) returnfalse;
}
/* Establish that 1st page SIGSEGV's. */
ASSERT_FALSE(try_read_write_buf(ptr));
/* Ensure we can touch everything else.*/ for (i = 1; i < NUM_PAGES; i++) { char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
/* Establish a guard page at the end of the mapping. */
ASSERT_EQ(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
MADV_GUARD_INSTALL), 0);
/* Check that both guard pages result in SIGSEGV. */
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
/* Remove the first guard page. */
ASSERT_FALSE(madvise(ptr, page_size, MADV_GUARD_REMOVE));
/* Make sure we can touch it. */
ASSERT_TRUE(try_read_write_buf(ptr));
/* Remove the last guard page. */
ASSERT_FALSE(madvise(&ptr[(NUM_PAGES - 1) * page_size], page_size,
MADV_GUARD_REMOVE));
/* Make sure we can touch it. */
ASSERT_TRUE(try_read_write_buf(&ptr[(NUM_PAGES - 1) * page_size]));
/* * Test setting a _range_ of pages, namely the first 3. The first of * these be faulted in, so this also tests that we can install guard * pages over backed pages.
*/
ASSERT_EQ(madvise(ptr, 3 * page_size, MADV_GUARD_INSTALL), 0);
/* Make sure they are all guard pages. */ for (i = 0; i < 3; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
/* Make sure the rest are not. */ for (i = 3; i < NUM_PAGES; i++) { char *curr = &ptr[i * page_size];
/* Assert that operations applied across multiple VMAs work as expected. */
TEST_F(guard_regions, multi_vma)
{ constunsignedlong page_size = self->page_size; char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3; int i;
/* Reserve a 100 page region over which we can install VMAs. */
ptr_region = mmap_(self, variant, NULL, 100 * page_size,
PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
/* Place a VMA of 10 pages size at the start of the region. */
ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr1, MAP_FAILED);
/* Place a VMA of 5 pages size 50 pages into the region. */
ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
/* Place a VMA of 20 pages size at the end of the region. */
ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
/* * We end up with VMAs like this: * * 0 10 .. 50 55 .. 80 100 * [---] [---] [---]
*/
/* * Now mark the whole range as guard pages and make sure all VMAs are as * such.
*/
/* * madvise() is certifiable and lets you perform operations over gaps, * everything works, but it indicates an error and errno is set to * -ENOMEM. Also if anything runs out of memory it is set to * -ENOMEM. You are meant to guess which is which.
*/
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), -1);
ASSERT_EQ(errno, ENOMEM);
for (i = 0; i < 10; i++) { char *curr = &ptr1[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 0; i < 5; i++) { char *curr = &ptr2[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
for (i = 0; i < 20; i++) { char *curr = &ptr3[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
/* Now remove guar pages over range and assert the opposite. */
/* * We end up with VMAs like this: * * 0 10 .. 50 55 .. 80 100 * [---][xxxx][---][xxxx][---] * * Where 'x' signifies VMAs that cannot be merged with those adjacent to * them.
*/
/* Multiple VMAs adjacent to one another should result in no error. */
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_INSTALL), 0); for (i = 0; i < 100; i++) { char *curr = &ptr_region[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
ASSERT_EQ(madvise(ptr_region, 100 * page_size, MADV_GUARD_REMOVE), 0); for (i = 0; i < 100; i++) { char *curr = &ptr_region[i * page_size];
/* Now guard in one step. */
count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
/* OK we don't have permission to do this, skip. */ if (count == -1 && errno == EPERM)
SKIP(return, "No process_madvise() permissions, try running as root.\n");
/* Returns the number of bytes advised. */
ASSERT_EQ(count, 6 * page_size);
/* Assert that mprotect() operations have no bearing on guard markers. */
TEST_F(guard_regions, mprotect)
{ constunsignedlong page_size = self->page_size; char *ptr; int i;
/* Guard the middle of the range. */
ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
MADV_GUARD_INSTALL), 0);
/* Assert that it is indeed guarded. */
ASSERT_FALSE(try_read_write_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_write_buf(&ptr[6 * page_size]));
/* Now make these pages read-only. */
ASSERT_EQ(mprotect(&ptr[5 * page_size], 2 * page_size, PROT_READ), 0);
/* Make sure the range is still guarded. */
ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
/* Make sure we can guard again without issue.*/
ASSERT_EQ(madvise(&ptr[5 * page_size], 2 * page_size,
MADV_GUARD_INSTALL), 0);
/* Make sure the range is, yet again, still guarded. */
ASSERT_FALSE(try_read_buf(&ptr[5 * page_size]));
ASSERT_FALSE(try_read_buf(&ptr[6 * page_size]));
/* Now unguard the whole range. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
/* Make sure the whole range is readable. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size];
/* Split and merge VMAs and make sure guard pages still behave. */
TEST_F(guard_regions, split_merge)
{ constunsignedlong page_size = self->page_size; char *ptr, *ptr_new; int i;
/* Make sure the whole range is guarded. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
/* Now unmap some pages in the range so we split. */
ASSERT_EQ(munmap(&ptr[2 * page_size], page_size), 0);
ASSERT_EQ(munmap(&ptr[5 * page_size], page_size), 0);
ASSERT_EQ(munmap(&ptr[8 * page_size], page_size), 0);
/* Make sure the remaining ranges are guarded post-split. */ for (i = 0; i < 2; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
} for (i = 2; i < 5; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
} for (i = 6; i < 8; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
} for (i = 9; i < 10; i++) { char *curr = &ptr[i * page_size];
ASSERT_FALSE(try_read_write_buf(curr));
}
/* Now map them again - the unmap will have cleared the guards. */
ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now make sure guard pages are established. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size]; bool result = try_read_write_buf(curr); bool expect_true = i == 2 || i == 5 || i == 8;
/* Back the whole range. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size];
*curr = 'y';
}
/* Guard every other page. */ for (i = 0; i < 10; i += 2) { char *curr = &ptr[i * page_size]; int res = madvise(curr, page_size, MADV_GUARD_INSTALL);
ASSERT_EQ(res, 0);
}
/* Indicate that we don't need any of the range. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_DONTNEED), 0);
/* Check to ensure guard markers are still in place. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size]; bool result = try_read_buf(curr);
if (i % 2 == 0) {
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result); switch (variant->backing) { case ANON_BACKED: /* If anon, then we get a zero page. */
ASSERT_EQ(*curr, '\0'); break; default: /* Otherwise, we get the file data. */
ASSERT_EQ(*curr, 'y'); break;
}
}
/* Now write... */
result = try_write_buf(&ptr[i * page_size]);
/* ...and make sure same result. */
ASSERT_TRUE(i % 2 != 0 ? result : !result);
}
/* Now try to guard, should fail with EINVAL. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), -1);
ASSERT_EQ(errno, EINVAL);
/* OK unlock. */
ASSERT_EQ(munlock(ptr, 10 * page_size), 0);
/* Guard first half of range, should now succeed. */
ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
/* Make sure guard works. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size]; bool result = try_read_write_buf(curr);
if (i < 5) {
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result);
ASSERT_EQ(*curr, 'x');
}
}
/* * Now lock the latter part of the range. We can't lock the guard pages, * as this would result in the pages being populated and the guarding * would cause this to error out.
*/
ASSERT_EQ(mlock(&ptr[5 * page_size], 5 * page_size), 0);
/* * Now remove guard pages, we permit mlock()'d ranges to have guard * pages removed as it is a non-destructive operation.
*/
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
/* Now check that no guard pages remain. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size];
/* Place guard markers at both ends of the 5 page span. */
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
/* Make sure the guard pages are in effect. */
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Map a new region we will move this range into. Doing this ensures * that we have reserved a range to map into.
*/
ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Make sure the guard markers are retained. */
ASSERT_FALSE(try_read_write_buf(ptr_new));
ASSERT_FALSE(try_read_write_buf(&ptr_new[4 * page_size]));
/* * Clean up - we only need reference the new pointer as we overwrote the * PROT_NONE range and moved the existing one.
*/
munmap(ptr_new, 5 * page_size);
}
/* * Assert that moving, extending and shrinking memory via mremap() retains * guard markers where possible. * * Expanding should retain guard pages, only now in different position. The user * will have to remove guard pages manually to fix up (they'd have to do the * same if it were a PROT_NONE mapping).
*/
TEST_F(guard_regions, mremap_expand)
{ constunsignedlong page_size = self->page_size; char *ptr, *ptr_new;
/* Map 10 pages... */
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED); /* ...But unmap the last 5 so we can ensure we can expand into them. */
ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
/* Place guard markers at both ends of the 5 page span. */
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
/* Make sure the guarding is in effect. */
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Now expand to 10 pages. */
ptr = mremap(ptr, 5 * page_size, 10 * page_size, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* * Make sure the guard markers are retained in their original positions.
*/
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Reserve a region which we can move to and expand into. */
ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now move and expand into it. */
ptr = mremap(ptr, 10 * page_size, 20 * page_size,
MREMAP_MAYMOVE | MREMAP_FIXED, ptr_new);
ASSERT_EQ(ptr, ptr_new);
/* * Again, make sure the guard markers are retained in their original positions.
*/
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* * A real user would have to remove guard markers, but would reasonably * expect all characteristics of the mapping to be retained, including * guard markers.
*/
/* Cleanup. */
munmap(ptr, 20 * page_size);
} /* * Assert that moving, extending and shrinking memory via mremap() retains * guard markers where possible. * * Shrinking will result in markers that are shrunk over being removed. Again, * if the user were using a PROT_NONE mapping they'd have to manually fix this * up also so this is OK.
*/
TEST_F(guard_regions, mremap_shrink)
{ constunsignedlong page_size = self->page_size; char *ptr; int i;
/* Place guard markers at both ends of the 5 page span. */
ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
ASSERT_EQ(madvise(&ptr[4 * page_size], page_size, MADV_GUARD_INSTALL), 0);
/* Make sure the guarding is in effect. */
ASSERT_FALSE(try_read_write_buf(ptr));
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Now shrink to 3 pages. */
ptr = mremap(ptr, 5 * page_size, 3 * page_size, MREMAP_MAYMOVE);
ASSERT_NE(ptr, MAP_FAILED);
/* We expect the guard marker at the start to be retained... */
ASSERT_FALSE(try_read_write_buf(ptr));
/* ...But remaining pages will not have guard markers. */ for (i = 1; i < 3; i++) { char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
/* * As with expansion, a real user would have to remove guard pages and * fixup. But you'd have to do similar manual things with PROT_NONE * mappings too.
*/
/* * If we expand back to the original size, the end marker will, of * course, no longer be present.
*/
ptr = mremap(ptr, 3 * page_size, 5 * page_size, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Again, we expect the guard marker at the start to be retained... */
ASSERT_FALSE(try_read_write_buf(ptr));
/* ...But remaining pages will not have guard markers. */ for (i = 1; i < 5; i++) { char *curr = &ptr[i * page_size];
ASSERT_TRUE(try_read_write_buf(curr));
}
/* Cleanup. */
munmap(ptr, 5 * page_size);
}
/* * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set * retain guard pages.
*/
TEST_F(guard_regions, fork)
{ constunsignedlong page_size = self->page_size; char *ptr;
pid_t pid; int i;
/* Establish guard pages in the first 5 pages. */
ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0);
pid = fork();
ASSERT_NE(pid, -1); if (!pid) { /* This is the child process now. */
/* Assert that the guarding is in effect. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size]; bool result = try_read_write_buf(curr);
ASSERT_TRUE(i >= 5 ? result : !result);
}
/* Now unguard the range.*/
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
exit(0);
}
/* Parent process. */
/* Parent simply waits on child. */
waitpid(pid, NULL, 0);
/* Child unguard does not impact parent page table state. */ for (i = 0; i < 10; i++) { char *curr = &ptr[i * page_size]; bool result = try_read_write_buf(curr);
/* * Assert expected behaviour after we fork populated ranges of anonymous memory * and then guard and unguard the range.
*/
TEST_F(guard_regions, fork_cow)
{ constunsignedlong page_size = self->page_size; char *ptr;
pid_t pid; int i;
if (variant->backing != ANON_BACKED)
SKIP(return, "CoW only supported on anon mappings");
/* Populate range. */ for (i = 0; i < 10 * page_size; i++) { char chr = 'a' + (i % 26);
ptr[i] = chr;
}
pid = fork();
ASSERT_NE(pid, -1); if (!pid) { /* This is the child process now. */
/* Ensure the range is as expected. */ for (i = 0; i < 10 * page_size; i++) { char expected = 'a' + (i % 26); char actual = ptr[i];
ASSERT_EQ(actual, expected);
}
/* Establish guard pages across the whole range. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); /* Remove it. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
/* * By removing the guard pages, the page tables will be * cleared. Assert that we are looking at the zero page now.
*/ for (i = 0; i < 10 * page_size; i++) { char actual = ptr[i];
ASSERT_EQ(actual, '\0');
}
exit(0);
}
/* Parent process. */
/* Parent simply waits on child. */
waitpid(pid, NULL, 0);
/* Ensure the range is unchanged in parent anon range. */ for (i = 0; i < 10 * page_size; i++) { char expected = 'a' + (i % 26); char actual = ptr[i];
/* * Assert that forking a process with VMAs that do have VM_WIPEONFORK set * behave as expected.
*/
TEST_F(guard_regions, fork_wipeonfork)
{ constunsignedlong page_size = self->page_size; char *ptr;
pid_t pid; int i;
if (variant->backing != ANON_BACKED)
SKIP(return, "Wipe on fork only supported on anon mappings");
/* Ensure that guard pages do not break userfaultd. */
TEST_F(guard_regions, uffd)
{ constunsignedlong page_size = self->page_size; int uffd; char *ptr; int i; struct uffdio_api api = {
.api = UFFD_API,
.features = 0,
}; struct uffdio_register reg; struct uffdio_range range;
if (!is_anon_backed(variant))
SKIP(return, "uffd only works on anon backing");
/* Set up uffd. */
uffd = userfaultfd(0); if (uffd == -1) { switch (errno) { case EPERM:
SKIP(return, "No userfaultfd permissions, try running as root."); break; case ENOSYS:
SKIP(return, "userfaultfd is not supported/not enabled."); break; default:
ksft_exit_fail_msg("userfaultfd failed with %s\n",
strerror(errno)); break;
}
}
/* * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we * aggressively read-ahead, then install guard regions and assert that it * behaves correctly. * * We page out using MADV_PAGEOUT before checking guard regions so we drop page * cache folios, meaning we maximise the possibility of some broken readahead.
*/
TEST_F(guard_regions, madvise_sequential)
{ char *ptr; int i; constunsignedlong page_size = self->page_size;
if (variant->backing == ANON_BACKED)
SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
/* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
ASSERT_NE(ptr_private, MAP_FAILED);
/* Set pattern in shared mapping. */
set_pattern(ptr_shared, 10, page_size);
/* Install guard regions in every other page in the shared mapping. */ for (i = 0; i < 10; i += 2) { char *ptr = &ptr_shared[i * page_size];
for (i = 0; i < 10; i++) { /* Every even shared page should be guarded. */
ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); /* Private mappings should always be readable. */
ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
}
/* Install guard regions in every other page in the private mapping. */ for (i = 0; i < 10; i += 2) { char *ptr = &ptr_private[i * page_size];
for (i = 0; i < 10; i++) { /* Every even shared page should be guarded. */
ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); /* Every odd private page should be guarded. */
ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
}
/* Remove guard regions from shared mapping. */
ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) { /* Shared mappings should always be readable. */
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); /* Every even private page should be guarded. */
ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
}
/* Remove guard regions from private mapping. */
ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
for (i = 0; i < 10; i++) { /* Shared mappings should always be readable. */
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); /* Private mappings should always be readable. */
ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
}
/* Now write out every other page to MAP_PRIVATE. */ for (i = 0; i < 10; i += 2) { char *ptr = &ptr_private[i * page_size];
memset(ptr, 'a' + i, page_size);
}
/* * At this point the mapping is: * * 0123456789 * SPSPSPSPSP * * Where S = shared, P = private mappings.
*/
/* Now mark the beginning of the mapping guarded. */
ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
/* * This renders the mapping: * * 0123456789 * xxxxxPSPSP
*/
for (i = 0; i < 10; i++) { char *ptr = &ptr_private[i * page_size];
/* Ensure guard regions as expected. */
ASSERT_EQ(try_read_buf(ptr), i >= 5); /* The shared mapping should always succeed. */
ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
}
/* Remove the guard regions altogether. */
ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
/* * * We now expect the mapping to be: * * 0123456789 * SSSSSPSPSP * * As we removed guard regions, the private pages from the first 5 will * have been zapped, so on fault will reestablish the shared mapping.
*/
for (i = 0; i < 10; i++) { char *ptr = &ptr_private[i * page_size];
/* * Assert that shared mappings in the MAP_PRIVATE mapping match * the shared mapping.
*/ if (i < 5 || i % 2 == 0) { char *ptr_s = &ptr_shared[i * page_size];
/* Test that guard regions established over a read-only mapping function correctly. */
TEST_F(guard_regions, readonly_file)
{ constunsignedlong page_size = self->page_size; char *ptr; int i;
if (variant->backing != LOCAL_FILE_BACKED)
SKIP(return, "Read-only test specific to file-backed");
/* Map shared so we can populate with pattern, populate it, unmap. */
ptr = mmap_(self, variant, NULL, 10 * page_size,
PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
set_pattern(ptr, 10, page_size);
ASSERT_EQ(munmap(ptr, 10 * page_size), 0); /* Close the fd so we can re-open read-only. */
ASSERT_EQ(close(self->fd), 0);
/* Now assert things are as expected. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
/* Now truncate to actually used size (initialised to 100). */
ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
/* Here the guard regions will remain intact. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
/* Now truncate to half the size, then truncate again to the full size. */
ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
/* Again, guard pages will remain intact. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
}
/* Install a guard region in the middle of the mapping. */
ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
MADV_GUARD_INSTALL), 0);
/* * The buffer will now be: * * 0123456789 * ***xxxx*** * * Where * is data and x is the guard region.
*/
/* Ensure established. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
}
/* Now hole punch the guarded region. */
ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
MADV_REMOVE), 0);
/* Ensure guard regions remain. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
}
/* Now remove guard region throughout. */
ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
/* Check that the pattern exists in non-hole punched region. */
ASSERT_TRUE(check_pattern(ptr, 3, page_size)); /* Check that hole punched region is zeroed. */
ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0')); /* Check that the pattern exists in the remainder of the file. */
ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
/* * Ensure that a memfd works correctly with guard regions, that we can write * seal it then open the mapping read-only and still establish guard regions * within, remove those guard regions and have everything work correctly.
*/
TEST_F(guard_regions, memfd_write_seal)
{ constunsignedlong page_size = self->page_size; char *ptr; int i;
if (variant->backing != SHMEM_BACKED)
SKIP(return, "memfd write seal test specific to shmem");
/* OK, we need a memfd, so close existing one. */
ASSERT_EQ(close(self->fd), 0);
/* Ensure pattern is as expected. */
ASSERT_TRUE(check_pattern(ptr, 10, page_size));
/* Ensure write seal intact. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size];
ASSERT_FALSE(try_write_buf(ptr_p));
}
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
/* * Since we are now permitted to establish guard regions in read-only anonymous * mappings, for the sake of thoroughness, though it probably has no practical * use, test that guard regions function with a mapping to the anonymous zero * page.
*/
TEST_F(guard_regions, anon_zeropage)
{ constunsignedlong page_size = self->page_size; char *ptr; int i;
if (!is_anon_backed(variant))
SKIP(return, "anon zero page test specific to anon/shmem");
/* Obtain a read-only i.e. anon zero page mapping. */
ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Now make every even page guarded. */ for (i = 0; i < 10; i += 2) { char *ptr_p = &ptr[i * page_size];
/* Read from pagemap, and assert no guard regions are detected. */ for (i = 0; i < 10; i++) { char *ptr_p = &ptr[i * page_size]; unsignedlong entry = pagemap_get_entry(proc_fd, ptr_p); unsignedlong masked = entry & PM_GUARD_REGION;
ASSERT_EQ(masked, 0);
}
/* Install a guard region in every other page. */ for (i = 0; i < 10; i += 2) { char *ptr_p = &ptr[i * page_size];
/* * Assert ioctl() returns the count of located regions, where each * region spans every other page within the range of 10 pages.
*/
ASSERT_EQ(ioctl(proc_fd, PAGEMAP_SCAN, &pm_scan_args), 5);
ASSERT_EQ(pm_scan_args.walk_end, (long)ptr + 10 * page_size);
/* Re-read from pagemap, and assert guard regions are detected. */ for (i = 0; i < 5; i++) { long ptr_p = (long)&ptr[2 * i * page_size];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.