/* Busy loop in userspace to elapse ITIMER_VIRTUAL */ staticvoid user_loop(void)
{ while (!done);
}
/* * Try to spend as much time as possible in kernelspace * to elapse ITIMER_PROF.
*/ staticvoid kernel_loop(void)
{ void *addr = sbrk(0); int err = 0;
/* * Check the expected timer expiration matches the GTOD elapsed delta since * we armed the timer. Keep a 0.5 sec error margin due to various jitter.
*/ staticint check_diff(struct timeval start, struct timeval end)
{ longlong diff;
/* 1/10 seconds to ensure the leader sleeps */
usleep(10000);
ctd_count = 100; if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
fatal_error(NULL, "timer_create()"); if (timer_settime(id, 0, &val, NULL))
fatal_error(NULL, "timer_settime()"); while (ctd_count > 0 && !ctd_failed)
;
if (timer_delete(id))
fatal_error(NULL, "timer_delete()");
return NULL;
}
/* * Test that only the running thread receives the timer signal.
*/ staticvoid check_timer_distribution(void)
{ if (signal(SIGALRM, ctd_sighandler) == SIG_ERR)
fatal_error(NULL, "signal()");
if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
fatal_error(NULL, "pthread_create()");
if (pthread_join(ctd_thread, NULL))
fatal_error(NULL, "pthread_join()");
if (!ctd_failed)
ksft_test_result_pass("check signal distribution\n"); elseif (ksft_min_kernel_version(6, 3))
ksft_test_result_fail("check signal distribution\n"); else
ksft_test_result_skip("check signal distribution (old kernel)\n");
}
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1); if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
if (timer_create(CLOCK_MONOTONIC, &sev, &timerid))
fatal_error(NULL, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000;
timer_settime(timerid, 0, &its, NULL);
sleep(1);
/* Set the signal to be ignored */ if (signal(SIGUSR1, SIG_IGN) == SIG_ERR)
fatal_error(NULL, "signal(SIG_IGN)");
sleep(1);
if (thread) { /* Stop the thread first. No signal should be delivered to it */ if (pthread_cancel(pthread))
fatal_error(NULL, "pthread_cancel()"); if (pthread_join(pthread, NULL))
fatal_error(NULL, "pthread_join()");
}
/* Restore the handler */ if (sigaction(SIGUSR1, &sa, NULL))
fatal_error(NULL, "sigaction()");
sleep(1);
/* Unblock it, which should deliver the signal in the !thread case*/ if (sigprocmask(SIG_UNBLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_UNBLOCK)");
if (timer_delete(timerid))
fatal_error(NULL, "timer_delete()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1); if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1); if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(NULL, "sigprocmask(SIG_BLOCK)");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1); if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(name, "sigprocmask(SIG_BLOCK)");
if (timer_create(which, &sev, &timerid))
fatal_error(name, "timer_create()");
/* Start the timer to expire in 100ms and 100ms intervals */
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = 100000000;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 100000000; if (timer_settime(timerid, 0, &its, NULL))
fatal_error(name, "timer_settime()");
if (timer_gettime(timerid, &prev))
fatal_error(name, "timer_gettime()");
if (clock_gettime(which, &start))
fatal_error(name, "clock_gettime()");
do { if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()"); if (timer_gettime(timerid, &its))
fatal_error(name, "timer_gettime()"); if (its.it_value.tv_nsec > prev.it_value.tv_nsec)
wraps++;
prev = its;
} while (calcdiff_ns(now, start) < NSEC_PER_SEC);
if (timer_delete(timerid))
fatal_error(name, "timer_delete()");
/* Block the signal */
sigemptyset(&set);
sigaddset(&set, SIGUSR1); if (sigprocmask(SIG_BLOCK, &set, NULL))
fatal_error(name, "sigprocmask(SIG_BLOCK)");
if (id != 8) {
ksft_test_result_fail("check timer create exact %d != 8\n", id); return;
}
/* Validate that it went back to normal mode and allocates ID 9 */ if (do_timer_create(&id) < 0)
fatal_error(NULL, "timer_create()");
if (do_timer_delete(id))
fatal_error(NULL, "timer_delete()");
if (id == 9)
ksft_test_result_pass("check timer create exact\n"); else
ksft_test_result_fail("check timer create exact. Disabling failed.\n");
}
int main(int argc, char **argv)
{
ksft_print_header();
ksft_set_plan(19);
ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
ksft_print_msg("based timers if other threads run on the CPU...\n");
/* * It's unfortunately hard to reliably test a timer expiration * on parallel multithread cputime. We could arm it to expire * on DELAY * nr_threads, with nr_threads busy looping, then wait * the normal DELAY since the time is elapsing nr_threads faster. * But for that we need to ensure we have real physical free CPUs * to ensure true parallelism. So test only one thread until we * find a better solution.
*/
check_timer_create(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_timer_distribution();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.