int migrate(uint64_t *ptr, int n1, int n2)
{ int ret, tmp; int status = 0; struct timespec ts1, ts2; int failures = 0;
if (clock_gettime(CLOCK_MONOTONIC, &ts1)) return -1;
while (1) { if (clock_gettime(CLOCK_MONOTONIC, &ts2)) return -1;
if (ts2.tv_sec - ts1.tv_sec >= RUNTIME) return 0;
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
MPOL_MF_MOVE_ALL); if (ret) { if (ret > 0) { /* Migration is best effort; try again */ if (++failures < MAX_RETRIES) continue;
printf("Didn't migrate %d pages\n", ret);
} else
perror("Couldn't migrate pages"); return -2;
}
failures = 0;
tmp = n2;
n2 = n1;
n1 = tmp;
}
return 0;
}
void *access_mem(void *ptr)
{ while (1) {
pthread_testcancel(); /* Force a read from the memory pointed to by ptr. This ensures * the memory access actually happens and prevents the compiler * from optimizing away this entire loop.
*/
FORCE_READ(*(uint64_t *)ptr);
}
return NULL;
}
/* * Basic migration entry testing. One thread will move pages back and forth * between nodes whilst other threads try and access them triggering the * migration entry wait paths in the kernel.
*/
TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
{
uint64_t *ptr; int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
memset(ptr, 0xde, TWOMEG); for (i = 0; i < self->nthreads - 1; i++) {
pid = fork(); if (!pid) {
prctl(PR_SET_PDEATHSIG, SIGHUP); /* Parent may have died before prctl so check now. */ if (getppid() == 1)
kill(getpid(), SIGHUP);
access_mem(ptr);
} else {
self->pids[i] = pid;
}
}
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
}
/* * Tests the pmd migration entry paths.
*/
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
{
uint64_t *ptr; int i;
if (!thp_is_enabled())
SKIP(return, "Transparent Hugepages not available");
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
memset(ptr, 0xde, TWOMEG); for (i = 0; i < self->nthreads - 1; i++) {
pid = fork(); if (!pid) {
prctl(PR_SET_PDEATHSIG, SIGHUP); /* Parent may have died before prctl so check now. */ if (getppid() == 1)
kill(getpid(), SIGHUP);
access_mem(ptr);
} else {
self->pids[i] = pid;
}
}
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
}
/* * migration test with private anon hugetlb page
*/
TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
{
uint64_t *ptr; int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
memset(ptr, 0xde, TWOMEG); for (i = 0; i < self->nthreads - 1; i++) {
pid = fork(); if (!pid) {
prctl(PR_SET_PDEATHSIG, SIGHUP); /* Parent may have died before prctl so check now. */ if (getppid() == 1)
kill(getpid(), SIGHUP);
access_mem(ptr);
} else {
self->pids[i] = pid;
}
}
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); for (i = 0; i < self->nthreads - 1; i++)
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.