// SPDX-License-Identifier: GPL-2.0-only /* * Stress userfaultfd syscall. * * Copyright (C) 2015 Red Hat, Inc. * * This test allocates two virtual areas and bounces the physical * memory across the two virtual areas (from area_src to area_dst) * using userfaultfd. * * There are three threads running per CPU: * * 1) one per-CPU thread takes a per-page pthread_mutex in a random * page of the area_dst (while the physical page may still be in * area_src), and increments a per-page counter in the same page, * and checks its value against a verification region. * * 2) another per-CPU thread handles the userfaults generated by * thread 1 above. userfaultfd blocking reads or poll() modes are * exercised interleaved. * * 3) one last per-CPU thread transfers the memory in the background * at maximum bandwidth (if not already transferred by thread * 2). Each cpu thread takes cares of transferring a portion of the * area. * * When all threads of type 3 completed the transfer, one bounce is * complete. area_src and area_dst are then swapped. All threads are * respawned and so the bounce is immediately restarted in the * opposite direction. * * per-CPU threads 1 by triggering userfaults inside * pthread_mutex_lock will also verify the atomicity of the memory * transfer (UFFDIO_COPY).
*/
/* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */ #define ALARM_INTERVAL_SECS 10 staticchar *zeropage;
pthread_attr_t attr;
#define swap(a, b) \ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
constchar *examples = "# Run anonymous memory test on 100MiB region with 99999 bounces:\n" "./uffd-stress anon 100 99999\n\n" "# Run share memory test on 1GiB region with 99 bounces:\n" "./uffd-stress shmem 1000 99\n\n" "# Run hugetlb memory test on 256MiB region with 50 bounces:\n" "./uffd-stress hugetlb 256 50\n\n" "# Run the same hugetlb test but using private file:\n" "./uffd-stress hugetlb-private 256 50\n\n" "# 10MiB-~6GiB 999 bounces anonymous test, " "continue forever unless an error triggers\n" "while ./uffd-stress anon $[RANDOM % 6000 + 10] 999; do true; done\n\n";
/* Copy the first half of the pages */ for (page_nr = start_nr; page_nr < mid_nr; page_nr++)
copy_page_retry(uffd, page_nr * page_size);
/* * If we need to test uffd-wp, set it up now. Then we'll have * at least the first half of the pages mapped already which * can be write-protected for testing
*/ if (test_uffdio_wp)
wp_range(uffd, (unsignedlong)area_dst + start_nr * page_size,
nr_pages_per_cpu * page_size, true);
/* * Continue the 2nd half of the page copying, handling write * protection faults if any
*/ for (page_nr = mid_nr; page_nr < end_nr; page_nr++)
copy_page_retry(uffd, page_nr * page_size);
finished = 0; for (cpu = 0; cpu < nr_parallel; cpu++) { if (pthread_create(&locking_threads[cpu], &attr,
locking_thread, (void *)cpu)) return 1; if (bounces & BOUNCE_POLL) { if (pthread_create(&uffd_threads[cpu], &attr, uffd_poll_thread, &args[cpu]))
err("uffd_poll_thread create");
} else { if (pthread_create(&uffd_threads[cpu], &attr,
uffd_read_thread,
(void *)&args[cpu])) return 1;
pthread_mutex_lock(&uffd_read_mutex);
} if (pthread_create(&background_threads[cpu], &attr,
background_thread, (void *)cpu)) return 1;
} for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(background_threads[cpu], NULL)) return 1;
/* * Be strict and immediately zap area_src, the whole area has * been transferred already by the background treads. The * area_src could then be faulted in a racy way by still * running uffdio_threads reading zeropages after we zapped * area_src (but they're guaranteed to get -EEXIST from * UFFDIO_COPY without writing zero pages into area_dst * because the background threads already completed).
*/
uffd_test_ops->release_pages(area_src);
finished = 1; for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(locking_threads[cpu], NULL)) return 1;
for (cpu = 0; cpu < nr_parallel; cpu++) { char c; if (bounces & BOUNCE_POLL) { if (write(pipefd[cpu*2+1], &c, 1) != 1)
err("pipefd write error"); if (pthread_join(uffd_threads[cpu],
(void *)&args[cpu])) return 1;
} else { if (pthread_cancel(uffd_threads[cpu])) return 1; if (pthread_join(uffd_threads[cpu], NULL)) return 1;
}
}
if (area_dst_alias) { if (uffd_register(uffd, area_dst_alias, mem_size, true, test_uffdio_wp, false))
err("register failure alias");
}
/* * The madvise done previously isn't enough: some * uffd_thread could have read userfaults (one of * those already resolved by the background thread) * and it may be in the process of calling * UFFDIO_COPY. UFFDIO_COPY will read the zapped * area_src and it would map a zero page in it (of * course such a UFFDIO_COPY is perfectly safe as it'd * return -EEXIST). The problem comes at the next * bounce though: that racing UFFDIO_COPY would * generate zeropages in the area_src, so invalidating * the previous MADV_DONTNEED. Without this additional * MADV_DONTNEED those zeropages leftovers in the * area_src would lead to -EEXIST failure during the * next bounce, effectively leaving a zeropage in the * area_dst. * * Try to comment this out madvise to see the memory * corruption being caught pretty quick. * * khugepaged is also inhibited to collapse THP after * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's * required to MADV_DONTNEED here.
*/
uffd_test_ops->release_pages(area_dst);
if (!page_size)
err("Unable to determine page size"); if ((unsignedlong) area_count(NULL, 0) + sizeof(unsignedlonglong) * 2
> page_size)
err("Impossible to run this test");
/* * Whether we can test certain features depends not just on test type, * but also on whether or not this particular kernel supports the * feature.
*/
if (uffd_get_features(&features) && errno == ENOENT)
ksft_exit_skip("failed to get available features (%d)\n", errno);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.