// SPDX-License-Identifier: GPL-2.0-only /* * arch_timer_edge_cases.c - Tests the aarch64 timer IRQ functionality. * * The test validates some edge cases related to the arch-timer: * - timers above the max TVAL value. * - timers in the past * - moving counters ahead and behind pending timers. * - reprograming timers. * - timers fired multiple times. * - masking/unmasking using the timer control mask. * * Copyright (c) 2021, Google LLC.
*/
struct test_args { /* Virtual or physical timer and counter tests. */ enum arch_timer timer; /* Delay used for most timer tests. */
uint64_t wait_ms; /* Delay used in the test_long_timer_delays test. */
uint64_t long_wait_ms; /* Number of iterations. */ int iterations; /* Whether to test the physical timer. */ bool test_physical; /* Whether to test the virtual timer. */ bool test_virtual;
};
staticvoid set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl, enum timer_view tv)
{ switch (tv) { case TIMER_CVAL:
set_cval_irq(timer, xval, ctl); break; case TIMER_TVAL:
set_tval_irq(timer, xval, ctl); break; default:
GUEST_FAIL("Could not get timer %d", timer);
}
}
/* * Note that this can theoretically hang forever, so we rely on having * a timeout mechanism in the "runner", like: * tools/testing/selftests/kselftest/runner.sh.
*/ staticvoid wait_for_non_spurious_irq(void)
{ int h;
local_irq_disable();
for (h = atomic_read(&shared_data.handled); h == atomic_read(&shared_data.handled);) {
wfi();
local_irq_enable();
isb(); /* handle IRQ */
local_irq_disable();
}
}
/* * Wait for an non-spurious IRQ by polling in the guest or in * userspace (e.g. userspace_cmd=USERSPACE_SCHED_YIELD). * * Note that this can theoretically hang forever, so we rely on having * a timeout mechanism in the "runner", like: * tools/testing/selftests/kselftest/runner.sh.
*/ staticvoid poll_for_non_spurious_irq(enum sync_cmd usp_cmd)
{ int h;
local_irq_disable();
h = atomic_read(&shared_data.handled);
local_irq_enable(); while (h == atomic_read(&shared_data.handled)) { if (usp_cmd == NO_USERSPACE_CMD)
cpu_relax(); else
userspace_cmd(usp_cmd);
}
local_irq_disable();
}
/* * Reset the timer state to some nice values like the counter not being close * to the edge, and the control register masked and disabled.
*/ staticvoid reset_timer_state(enum arch_timer timer, uint64_t cnt)
{
set_counter(timer, cnt);
timer_set_ctl(timer, CTL_IMASK);
}
if (reset_state)
reset_timer_state(timer, reset_cnt);
set_xval_irq(timer, xval, CTL_ENABLE, tv);
/* This method re-enables IRQs to handle the one we're looking for. */
wm();
assert_irqs_handled(1);
local_irq_enable();
}
/* * The test_timer_* functions will program the timer, wait for it, and assert * the firing of the correct IRQ. * * These functions don't have a timeout and return as soon as they receive an * IRQ. They can hang (forever), so we rely on having a timeout mechanism in * the "runner", like: tools/testing/selftests/kselftest/runner.sh.
*/
staticvoid test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec,
sleep_method_t wm)
{ /* tval will be cast to an int32_t in test_xval_check_no_irq */
test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm);
}
/* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */ staticvoid test_timer_control_mask_then_unmask(enum arch_timer timer)
{
reset_timer_state(timer, DEF_CNT);
set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
/* Unmask the timer, and then get an IRQ. */
local_irq_disable();
timer_set_ctl(timer, CTL_ENABLE); /* This method re-enables IRQs to handle the one we're looking for. */
wait_for_non_spurious_irq();
assert_irqs_handled(1);
local_irq_enable();
}
/* Check that timer control masks actually mask a timer being fired. */ staticvoid test_timer_control_masks(enum arch_timer timer)
{
reset_timer_state(timer, DEF_CNT);
/* Local IRQs are not masked at this point. */
set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
sleep_poll(timer, TIMEOUT_NO_IRQ_US);
for (i = 1; i <= num; i++) { /* This method re-enables IRQs to handle the one we're looking for. */
wm();
/* The IRQ handler masked and disabled the timer. * Enable and unmmask it again.
*/
timer_set_ctl(timer, CTL_ENABLE);
assert_irqs_handled(i);
}
local_irq_enable();
}
staticvoid test_timers_fired_multiple_times(enum arch_timer timer)
{ int i;
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++)
test_fire_a_timer_multiple_times(timer, irq_wait_method[i], 10);
}
/* * Set a timer for tval=delta_1_ms then reprogram it to * tval=delta_2_ms. Check that we get the timer fired. There is no * timeout for the wait: we use the wfi instruction.
*/ staticvoid test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
int32_t delta_1_ms, int32_t delta_2_ms)
{
local_irq_disable();
reset_timer_state(timer, DEF_CNT);
/* Program the timer to DEF_CNT + delta_1_ms. */
set_tval_irq(timer, msec_to_cycles(delta_1_ms), CTL_ENABLE);
/* Reprogram the timer to DEF_CNT + delta_2_ms. */
timer_set_tval(timer, msec_to_cycles(delta_2_ms));
/* This method re-enables IRQs to handle the one we're looking for. */
wm();
/* The IRQ should arrive at DEF_CNT + delta_2_ms (or after). */
GUEST_ASSERT(timer_get_cntct(timer) >=
DEF_CNT + msec_to_cycles(delta_2_ms));
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { /* * Ensure reprogramming works whether going from a * longer time to a shorter or vice versa.
*/
test_reprogramming_timer(timer, irq_wait_method[i], 2 * base_wait,
base_wait);
test_reprogramming_timer(timer, irq_wait_method[i], base_wait,
2 * base_wait);
}
}
/* * This test checks basic timer behavior without actually firing timers, things * like: the relationship between cval and tval, tval down-counting.
*/ staticvoid timers_sanity_checks(enum arch_timer timer, bool use_sched)
{
reset_timer_state(timer, DEF_CNT);
local_irq_disable();
/* cval in the past */
timer_set_cval(timer,
timer_get_cntct(timer) -
msec_to_cycles(test_args.wait_ms)); if (use_sched)
userspace_migrate_vcpu();
GUEST_ASSERT(timer_get_tval(timer) < 0);
/* tval in the past */
timer_set_tval(timer, -1); if (use_sched)
userspace_migrate_vcpu();
GUEST_ASSERT(timer_get_cval(timer) < timer_get_cntct(timer));
/* tval larger than TVAL_MAX. This requires programming with * timer_set_cval instead so the value is expressible
*/
timer_set_cval(timer,
timer_get_cntct(timer) + TVAL_MAX +
msec_to_cycles(test_args.wait_ms)); if (use_sched)
userspace_migrate_vcpu();
GUEST_ASSERT(timer_get_tval(timer) <= 0);
/* * tval larger than 2 * TVAL_MAX. * Twice the TVAL_MAX completely loops around the TVAL.
*/
timer_set_cval(timer,
timer_get_cntct(timer) + 2ULL * TVAL_MAX +
msec_to_cycles(test_args.wait_ms)); if (use_sched)
userspace_migrate_vcpu();
GUEST_ASSERT(timer_get_tval(timer) <=
msec_to_cycles(test_args.wait_ms));
/* negative tval that rollovers from 0. */
set_counter(timer, msec_to_cycles(1));
timer_set_tval(timer, -1 * msec_to_cycles(test_args.wait_ms)); if (use_sched)
userspace_migrate_vcpu();
GUEST_ASSERT(timer_get_cval(timer) >= (CVAL_MAX - msec_to_cycles(test_args.wait_ms)));
/* tval should keep down-counting from 0 to -1. */
timer_set_tval(timer, 0);
sleep_poll(timer, 1);
GUEST_ASSERT(timer_get_tval(timer) < 0);
local_irq_enable();
/* Mask and disable any pending timer. */
timer_set_ctl(timer, CTL_IMASK);
}
staticvoid test_timers_sanity_checks(enum arch_timer timer)
{
timers_sanity_checks(timer, false); /* Check how KVM saves/restores these edge-case values. */
timers_sanity_checks(timer, true);
}
/* This method re-enables IRQs to handle the one we're looking for. */
wm();
assert_irqs_handled(1);
local_irq_enable();
}
/* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */ staticvoid test_timers_above_tval_max(enum arch_timer timer)
{
uint64_t cval; int i;
/* * Test that the system is not implementing cval in terms of * tval. If that was the case, setting a cval to "cval = now * + TVAL_MAX + wait_ms" would wrap to "cval = now + * wait_ms", and the timer would fire immediately. Test that it * doesn't.
*/ for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
reset_timer_state(timer, DEF_CNT);
cval = timer_get_cntct(timer) + TVAL_MAX +
msec_to_cycles(test_args.wait_ms);
test_cval_no_irq(timer, cval,
msecs_to_usecs(test_args.wait_ms) +
TIMEOUT_NO_IRQ_US, sleep_method[i]);
}
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) { /* Get the IRQ by moving the counter forward. */
test_set_cnt_after_tval_max(timer, irq_wait_method[i]);
}
}
/* * Template function to be used by the test_move_counter_ahead_* tests. It * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and * then waits for an IRQ.
*/ staticvoid test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
uint64_t xval, uint64_t cnt_2,
irq_wait_method_t wm, enum timer_view tv)
{
local_irq_disable();
set_xval_irq(timer, xval, CTL_ENABLE, tv);
set_counter(timer, cnt_2); /* This method re-enables IRQs to handle the one we're looking for. */
wm();
assert_irqs_handled(1);
local_irq_enable();
}
/* * Template function to be used by the test_move_counter_ahead_* tests. It * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and * then waits for an IRQ.
*/ staticvoid test_set_cnt_after_xval_no_irq(enum arch_timer timer,
uint64_t cnt_1, uint64_t xval,
uint64_t cnt_2,
sleep_method_t guest_sleep, enum timer_view tv)
{
local_irq_disable();
/* * Program a timer, mask it, and then change the tval or counter to cancel it. * Unmask it and check that nothing fires.
*/ staticvoid test_move_counters_behind_timers(enum arch_timer timer)
{ int i;
for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
sleep_method_t sm = sleep_method[i];
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
irq_wait_method_t wm = irq_wait_method[i];
/* set a timer wait_ms the past. */
cval = DEF_CNT - msec_to_cycles(test_args.wait_ms);
test_timer_cval(timer, cval, wm, true, DEF_CNT);
test_timer_tval(timer, tval, wm, true, DEF_CNT);
/* Set a timer to counter=0 (in the past) */
test_timer_cval(timer, 0, wm, true, DEF_CNT);
/* Set a time for tval=0 (now) */
test_timer_tval(timer, 0, wm, true, DEF_CNT);
/* Set a timer to as far in the past as possible */
test_timer_tval(timer, TVAL_MIN, wm, true, DEF_CNT);
}
/* * Set the counter to wait_ms, and a tval to -wait_ms. There should be no * IRQ as that tval means cval=CVAL_MAX-wait_ms.
*/ for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
sleep_method_t sm = sleep_method[i];
staticvoid test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-b] [-i iterations] [-l long_wait_ms] [-p] [-v]\n"
, name);
pr_info("\t-i: Number of iterations (default: %u)\n",
NR_TEST_ITERS_DEF);
pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
LONG_WAIT_TEST_MS);
pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n",
WAIT_TEST_MS);
pr_info("\t-p: Test physical timer (default: true)\n");
pr_info("\t-v: Test virtual timer (default: true)\n");
pr_info("\t-h: Print this help message\n");
}
staticbool parse_args(int argc, char *argv[])
{ int opt;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.