// SPDX-License-Identifier: GPL-2.0 /* * * A test for the patch "Allow compaction of unevictable pages". * With this patch we should be able to allocate at least 1/4 * of RAM in huge pages. Without the patch much less is * allocated.
*/
/* We want to test with 80% of available memory. Else, OOM killer comes
in to play */
mem_free = mem_free * 0.8;
fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK); if (fd < 0) {
ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
ret = -1; goto out;
}
/* * Request huge pages for about half of the free memory. The Kernel * will allocate as much as it can, and we expect it will get at least 1/3
*/
nr_hugepages_ul = mem_free / hugepage_size / 2;
snprintf(target_nr_hugepages, sizeof(target_nr_hugepages), "%lu", nr_hugepages_ul);
slen = strlen(target_nr_hugepages); if (write(fd, target_nr_hugepages, slen) != slen) {
ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n",
nr_hugepages_ul, strerror(errno)); goto close_fd;
}
lseek(fd, 0, SEEK_SET);
if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
ksft_print_msg("Failed to re-read from /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno)); goto close_fd;
}
/* We should have been able to request at least 1/3 rd of the memory in
huge pages */
nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10); if (!nr_hugepages_ul) {
ksft_print_msg("ERROR: No memory is available as huge pages\n"); goto close_fd;
}
compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
lseek(fd, 0, SEEK_SET);
if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages))
!= strlen(init_nr_hugepages)) {
ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno)); goto close_fd;
}
ksft_print_msg("Number of huge pages allocated = %lu\n",
nr_hugepages_ul);
if (compaction_index > 3) {
ksft_print_msg("ERROR: Less than 1/%d of memory is available\n" "as huge pages\n", compaction_index); goto close_fd;
}
int set_zero_hugepages(unsignedlong *initial_nr_hugepages)
{ int fd, ret = -1; char nr_hugepages[20] = {0};
fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK); if (fd < 0) {
ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno)); goto out;
} if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno)); goto close_fd;
}
lseek(fd, 0, SEEK_SET);
/* Start with the initial condition of 0 huge pages */ if (write(fd, "0", sizeof(char)) != sizeof(char)) {
ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno)); goto close_fd;
}
*initial_nr_hugepages = strtoul(nr_hugepages, NULL, 10);
ret = 0;
if (prereq() || geteuid())
ksft_exit_skip("Prerequisites unsatisfied\n");
ksft_set_plan(1);
/* Start the test without hugepages reducing mem_free */ if (set_zero_hugepages(&initial_nr_hugepages))
ksft_exit_fail();
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_MEMLOCK, &lim))
ksft_exit_fail_msg("Failed to set rlimit: %s\n", strerror(errno));
page_size = getpagesize();
if (read_memory_info(&mem_free, &hugepage_size) != 0)
ksft_exit_fail_msg("Failed to get meminfo\n");
entry = malloc(sizeof(struct map_list)); if (!entry) {
munmap(map, MAP_SIZE); break;
}
entry->map = map;
entry->next = list;
list = entry;
/* Write something (in this case the address of the map) to * ensure that KSM can't merge the mapped pages
*/ for (i = 0; i < MAP_SIZE; i += page_size)
*(unsignedlong *)(map + i) = (unsignedlong)map + i;
mem_fragmentable_MB -= MAP_SIZE_MB;
}
for (entry = list; entry != NULL; entry = entry->next) {
munmap(entry->map, MAP_SIZE); if (!entry->next) break;
entry = entry->next;
}
if (check_compaction(mem_free, hugepage_size,
initial_nr_hugepages) == 0)
ksft_exit_pass();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.