ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags,
ival, 0, 0);
__GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter);
}
/* This should be invoked only for reset counter use case */ staticvoid stop_reset_counter(unsignedlong counter, unsignedlong stop_flags)
{ struct sbiret ret;
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
__GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU");
__GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS, "Invalid number of counters %ld\n", ret.value);
return ret.value;
}
staticvoid update_counter_info(int num_counters)
{ int i = 0; struct sbiret ret;
for (i = 0; i < num_counters; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
/* There can be gaps in logical counter indicies*/ if (ret.error) continue;
GUEST_ASSERT_NE(ret.value, 0);
/* Do not set the initial value */
start_counter(counter, 0, 0);
dummy_func_loop(10000);
stop_counter(counter, 0);
counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
__GUEST_ASSERT(counter_value_post > counter_value_pre, "Event update verification failed: post [%lx] pre [%lx]\n",
counter_value_post, counter_value_pre);
/* * We can't just update the counter without starting it. * Do start/stop twice to simulate that by first initializing to a very * high value and a low value after that.
*/
start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2);
stop_counter(counter, 0);
counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
/* Now set the initial value and compare */
start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
dummy_func_loop(10000);
stop_counter(counter, 0);
counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
__GUEST_ASSERT(counter_value_post > counter_init_value, "Event update verification failed: post [%lx] pre [%lx]\n",
counter_value_post, counter_init_value);
/* Do not set the initial value */
start_counter(counter, 0, 0);
dummy_func_loop(10000);
stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
/* The counter value is updated w.r.t relative index of cbase */
counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
__GUEST_ASSERT(counter_value_post > counter_value_pre, "Event update verification failed: post [%lx] pre [%lx]\n",
counter_value_post, counter_value_pre);
/* * We can't just update the counter without starting it. * Do start/stop twice to simulate that by first initializing to a very * high value and a low value after that.
*/
WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2);
start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]);
/* Now set the initial value and compare */
WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
dummy_func_loop(10000);
stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
__GUEST_ASSERT(counter_value_post > counter_init_value, "Event update verification failed: post [%lx] pre [%lx]\n",
counter_value_post, counter_init_value);
/* The counter value is updated w.r.t relative index of cbase passed to start/stop */
WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
dummy_func_loop(10000);
udelay(msecs_to_usecs(2000)); /* irq handler should have stopped the counter */
stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]); /* The counter value after stopping should be less the init value due to overflow */
__GUEST_ASSERT(counter_value_post < counter_init_value, "counter_value_post %lx counter_init_value %lx for counter\n",
counter_value_post, counter_init_value);
stop_reset_counter(counter, 0);
}
staticvoid test_invalid_event(void)
{ struct sbiret ret; unsignedlong event = 0x1234; /* A random event */
for (i = 0; i < num_counters; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i,
0, 0, 0, 0, 0);
/* There can be gaps in logical counter indicies*/ if (ret.error) continue;
GUEST_ASSERT_NE(ret.value, 0);
ctrinfo.value = ret.value;
/** * Accessibility check of hardware and read capability of firmware counters. * The spec doesn't mandate any initial value. No need to check any value.
*/ if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) {
pmu_csr_read_num(ctrinfo.csr);
GUEST_ASSERT(illegal_handler_invoked);
} elseif (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
read_fw_counter(i, ctrinfo);
}
}
GUEST_DONE();
}
staticvoid test_pmu_events_snaphost(void)
{ int num_counters = 0; struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva; int i;
/* Verify presence of SBI PMU and minimum requrired SBI version */
verify_sbi_requirement_assert();
snapshot_set_shmem(snapshot_gpa, 0);
/* Get the counter details */
num_counters = get_num_counters();
update_counter_info(num_counters);
/* Validate shared memory access */
GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0); for (i = 0; i < num_counters; i++) { if (counter_mask_available & (BIT(i)))
GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0);
} /* Only these two events are guranteed to be present */
test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES);
test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS);
GUEST_DONE();
}
staticvoid test_pmu_events_overflow(void)
{ int num_counters = 0, i = 0;
/* Verify presence of SBI PMU and minimum requrired SBI version */
verify_sbi_requirement_assert();
/* Get the counter details */
num_counters = get_num_counters();
update_counter_info(num_counters);
/* * Qemu supports overflow for cycle/instruction. * This test may fail on any platform that do not support overflow for these two events.
*/ for (i = 0; i < targs.overflow_irqnum; i++)
test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
vcpu_shared_irq_count = 0;
for (i = 0; i < targs.overflow_irqnum; i++)
test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, timer_freq);
sync_global_to_guest(vm, vcpu_shared_irq_count);
sync_global_to_guest(vm, targs);
run_vcpu(vcpu);
test_vm_destroy(vm);
}
staticvoid test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-t ] [-n ]\n",
name);
pr_info("\t-t: Test to run (default all). Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
pr_info("\t-n: Number of LCOFI interrupt to trigger for each event in overflow test (default: %d)\n",
SBI_PMU_OVERFLOW_IRQNUM_DEFAULT);
pr_info("\t-h: print this help screen\n");
}
staticbool parse_args(int argc, char *argv[])
{ int opt; int temp_disabled_tests = SBI_PMU_TEST_BASIC | SBI_PMU_TEST_EVENTS | SBI_PMU_TEST_SNAPSHOT |
SBI_PMU_TEST_OVERFLOW; int overflow_interrupts = 0;
if (overflow_interrupts > 0) { if (targs.disabled_tests & SBI_PMU_TEST_OVERFLOW) {
pr_info("-n option is only available for overflow test\n"); goto done;
} else {
targs.overflow_irqnum = overflow_interrupts;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.