staticvoid test_ptrace_write(void)
{ char data = 1; char *mem; int ret;
ksft_print_msg("[INFO] PTRACE write access\n");
mem = mmap(NULL, pagesize, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0); if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n"); return;
}
/* Fault in the shared zeropage. */ if (*mem != 0) {
ksft_test_result_fail("Memory not zero\n"); goto munmap;
}
/* * Unshare the page (populating a fresh anon page that might be set * dirty in the PTE) in the read-only VMA using ptrace (FOLL_FORCE).
*/
lseek(mem_fd, (uintptr_t) mem, SEEK_SET);
ret = write(mem_fd, &data, 1); if (ret != 1 || *mem != data) {
ksft_test_result_fail("write() failed\n"); goto munmap;
}
staticvoid test_ptrace_write_thp(void)
{ char *mem, *mmap_mem;
size_t mmap_size; char data = 1; int ret;
ksft_print_msg("[INFO] PTRACE write access to THP\n");
mem = mmap_thp_range(PROT_READ, &mmap_mem, &mmap_size); if (mem == MAP_FAILED) return;
/* * Write to the first subpage in the read-only VMA using * ptrace(FOLL_FORCE), eventually placing a fresh THP that is marked * dirty in the PMD.
*/
lseek(mem_fd, (uintptr_t) mem, SEEK_SET);
ret = write(mem_fd, &data, 1); if (ret != 1 || *mem != data) {
ksft_test_result_fail("write() failed\n"); goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */ if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n"); goto munmap;
}
mem = mmap(NULL, pagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
-1, 0); if (mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n"); return;
}
/* Populate a fresh page and dirty it. */
memset(mem, 1, pagesize); if (mprotect(mem, pagesize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n"); goto munmap;
}
/* Trigger page migration. Might not be available or fail. */ if (syscall(__NR_mbind, mem, pagesize, MPOL_LOCAL, NULL, 0x7fful,
MPOL_MF_MOVE)) {
ksft_test_result_skip("mbind() failed\n"); goto munmap;
}
mem = mmap_thp_range(PROT_READ|PROT_WRITE, &mmap_mem, &mmap_size); if (mem == MAP_FAILED) return;
/* * Write to the first page, which might populate a fresh anon THP * and dirty it.
*/
memset(mem, 1, pagesize); if (mprotect(mem, thpsize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n"); goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */ if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n"); goto munmap;
}
/* Trigger page migration. Might not be available or fail. */ if (syscall(__NR_mbind, mem, thpsize, MPOL_LOCAL, NULL, 0x7fful,
MPOL_MF_MOVE)) {
ksft_test_result_skip("mbind() failed\n"); goto munmap;
}
mem = mmap_thp_range(PROT_READ|PROT_WRITE, &mmap_mem, &mmap_size); if (mem == MAP_FAILED) return;
/* * Write to the first page, which might populate a fresh anon THP * and dirty it.
*/
memset(mem, 1, pagesize); if (mprotect(mem, thpsize, PROT_READ)) {
ksft_test_result_fail("mprotect() failed\n"); goto munmap;
}
/* MM populated a THP if we got the last subpage populated as well. */ if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n"); goto munmap;
}
/* Trigger PTE-mapping the THP by mprotect'ing the last subpage. */ if (mprotect(mem + thpsize - pagesize, pagesize,
PROT_READ|PROT_WRITE)) {
ksft_test_result_fail("mprotect() failed\n"); goto munmap;
}
/* Place a page in a read-only VMA, which might set the PTE dirty. */
uffdio_copy.dst = (unsignedlong) dst;
uffdio_copy.src = (unsignedlong) src;
uffdio_copy.len = pagesize;
uffdio_copy.mode = 0; if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) {
ksft_test_result_fail("UFFDIO_COPY failed\n"); goto close_uffd;
}
/* * On some ptrace(FOLL_FORCE) write access via /proc/self/mem in * read-only VMAs, the kernel may set the PTE/PMD dirty.
*/
test_ptrace_write(); if (thpsize)
test_ptrace_write_thp(); /* * On page migration, the kernel may set the PTE/PMD dirty when * remapping the page.
*/
test_page_migration(); if (thpsize)
test_page_migration_thp(); /* PTE-mapping a THP might propagate the dirty PMD bit to the PTEs. */ if (thpsize)
test_pte_mapped_thp(); /* Placing a fresh page via userfaultfd may set the PTE dirty. */ #ifdef __NR_userfaultfd
test_uffdio_copy(); #endif/* __NR_userfaultfd */
err = ksft_get_fail_cnt(); if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
ksft_exit_pass();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.