/* Trigger an interrupt injection */
GUEST_SYNC(TEST_INJECT_VECTOR);
guest_wait_for_irq();
/* Test having the host set runstates manually */
GUEST_SYNC(TEST_RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
GUEST_ASSERT(rs->state == 0);
/* Test runstate time adjust */
GUEST_SYNC(TEST_RUNSTATE_ADJUST);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b);
/* Test runstate time set */
GUEST_SYNC(TEST_RUNSTATE_DATA);
GUEST_ASSERT(rs->state_entry_time >= 0x8000);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a);
/* sched_yield() should result in some 'runnable' time */
GUEST_SYNC(TEST_STEAL_TIME);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME);
/* Attempt to deliver a *masked* interrupt */
GUEST_SYNC(TEST_EVTCHN_MASKED);
/* Wait until we see the bit set */ struct shared_info *si = (void *)SHINFO_VADDR; while (!si->evtchn_pending[0])
__asm__ __volatile__ ("rep nop" : : : "memory");
/* Now deliver an *unmasked* interrupt */
GUEST_SYNC(TEST_EVTCHN_UNMASKED);
guest_wait_for_irq();
/* Change memslots and deliver an interrupt */
GUEST_SYNC(TEST_EVTCHN_SLOWPATH);
guest_wait_for_irq();
/* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */
GUEST_SYNC(TEST_EVTCHN_SEND_IOCTL);
/* * Same again, but this time the host has messed with memslots so it * should take the slow path in kvm_xen_set_evtchn().
*/
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD);
/* Deliver "outbound" event channel to an eventfd which
* happens to be one of our own irqfds. */
s.port = 197;
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_SETUP);
/* Set a timer 100ms in the future. */
xen_hypercall(__HYPERVISOR_set_timer_op,
rs->state_entry_time + 100000000, NULL);
GUEST_SYNC(TEST_TIMER_WAIT);
/* Now wait for the timer */
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_RESTORE);
/* The host has 'restored' the timer. Just wait for it. */
guest_wait_for_irq();
GUEST_SYNC(TEST_POLL_READY);
/* Poll for an event channel port which is already set */
u32 ports[1] = { EVTCHN_TIMER }; struct sched_poll p = {
.ports = ports,
.nr_ports = 1,
.timeout = 0,
};
/* Poll for an unset port and wait for the timeout. */
p.timeout = 100000000;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_SYNC(TEST_POLL_MASKED);
/* A timer will wake the masked port we're waiting on, while we poll */
p.timeout = 0;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_SYNC(TEST_POLL_WAKE);
/* Set the vcpu_info to point at exactly the place it already is to
* make sure the attribute is functional. */
GUEST_SYNC(SET_VCPU_INFO);
/* A timer wake an *unmasked* port which should wake us with an
* actual interrupt, while we're polling on a different port. */
ports[0]++;
p.timeout = 0;
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
guest_wait_for_irq();
GUEST_SYNC(TEST_TIMER_PAST);
/* Timer should have fired already */
guest_wait_for_irq();
GUEST_SYNC(TEST_LOCKING_SEND_RACE); /* Racing host ioctls */
guest_wait_for_irq();
GUEST_SYNC(TEST_LOCKING_POLL_RACE); /* Racing vmcall against host ioctl */
wait_for_timer: /* * Poll for a timer wake event while the worker thread is mucking with * the shared info. KVM XEN drops timer IRQs if the shared info is * invalid when the timer expires. Arbitrarily poll 100 times before * giving up and asking the VMM to re-arm the timer. 100 polls should * consume enough time to beat on KVM without taking too long if the * timer IRQ is dropped due to an invalid event channel.
*/ for (i = 0; i < 100 && !guest_saw_irq; i++)
__xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
/* * Re-send the timer IRQ if it was (likely) dropped due to the timer * expiring while the event channel was invalid.
*/ if (!guest_saw_irq) {
GUEST_SYNC(TEST_LOCKING_POLL_TIMEOUT); goto wait_for_timer;
}
guest_saw_irq = false;
/* Map a region for the shared_info page */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
int zero_fd = open("/dev/zero", O_RDONLY);
TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
/* Let the kernel know that we *will* use it for sending all
* event channels, which lets it intercept SCHEDOP_poll */ if (do_evtchn_tests)
hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
/* * Test what happens when the HVA of the shinfo page is remapped after * the kernel has a reference to it. But make sure we copy the clock * info over since that's only set at setup time, and we test it later.
*/ struct pvclock_wall_clock wc_copy = shinfo->wc; void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0);
TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info");
shinfo->wc = wc_copy;
/* Test migration to a different vCPU */
inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE;
inj.u.evtchn.deliver.port.vcpu = vcpu->id;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc); /* NOT REACHED */ case UCALL_SYNC: { struct kvm_xen_vcpu_attr rst; long rundelay;
if (do_runstate_tests)
TEST_ASSERT(rs->state_entry_time == rs->time[0] +
rs->time[1] + rs->time[2] + rs->time[3], "runstate times don't add up");
switch (uc.args[1]) { case TEST_INJECT_VECTOR: if (verbose)
printf("Delivering evtchn upcall\n");
evtchn_irq_expected = true;
vinfo->evtchn_upcall_pending = 1; break;
case TEST_RUNSTATE_runnable...TEST_RUNSTATE_offline:
TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen"); if (!do_runstate_tests) goto done; if (verbose)
printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1] + RUNSTATE_runnable -
TEST_RUNSTATE_runnable;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); break;
case TEST_STEAL_TIME: if (verbose)
printf("Testing steal time\n"); /* Yield until scheduler delay exceeds target */
rundelay = get_run_delay() + MIN_STEAL_TIME; do {
sched_yield();
} while (get_run_delay() < rundelay); break;
case TEST_EVTCHN_MASKED: if (!do_eventfd_tests) goto done; if (verbose)
printf("Testing masked event channel\n");
shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1;
eventfd_write(irq_fd[0], 1UL);
alarm(1); break;
case TEST_EVTCHN_UNMASKED: if (verbose)
printf("Testing unmasked event channel\n"); /* Unmask that, but deliver the other one */
shinfo->evtchn_pending[0] = 0;
shinfo->evtchn_mask[0] = 0;
eventfd_write(irq_fd[1], 1UL);
evtchn_irq_expected = true;
alarm(1); break;
case TEST_EVTCHN_SLOWPATH:
TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0; if (verbose)
printf("Testing event channel after memslot change\n");
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
DUMMY_REGION_GPA, DUMMY_REGION_SLOT, 1, 0);
eventfd_write(irq_fd[0], 1UL);
evtchn_irq_expected = true;
alarm(1); break;
case TEST_EVTCHN_SEND_IOCTL:
TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); if (!do_evtchn_tests) goto done;
shinfo->evtchn_pending[0] = 0; if (verbose)
printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n");
/* Read it back and check the pending time is reported correctly */
tmr.u.timer.expires_ns = 0;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000, "Timer not reported pending");
alarm(1); break;
case TEST_TIMER_PAST:
TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); /* Read timer and check it is no longer pending */
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
shinfo->evtchn_pending[0] = 0; if (verbose)
printf("Testing timer in the past\n");
case TEST_LOCKING_POLL_TIMEOUT: /* * Optional and possibly repeated sync point. * Injecting the timer IRQ may fail if the * shinfo is invalid when the timer expires. * If the timer has expired but the IRQ hasn't * been delivered, rearm the timer and retry.
*/
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
/* Resume the guest if the timer is still pending. */ if (tmr.u.timer.expires_ns) break;
/* All done if the IRQ was delivered. */ if (!evtchn_irq_expected) break;
tmr.u.timer.expires_ns = rs->state_entry_time +
SHINFO_RACE_TIMEOUT * 1000000000ULL;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); break; case TEST_DONE:
TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen");
ret = pthread_cancel(thread);
TEST_ASSERT(ret == 0, "pthread_cancel() failed: %s", strerror(ret));
/* * Just a *really* basic check that things are being put in the * right place. The actual calculations are much the same for * Xen as they are for the KVM variants, so no need to check.
*/ struct pvclock_wall_clock *wc; struct pvclock_vcpu_time_info *ti, *ti2; struct kvm_clock_data kcdata; longlong delta;
/* * KVM_GET_CLOCK gives CLOCK_REALTIME which jumps on leap seconds updates but * unfortunately KVM doesn't currently offer a CLOCK_TAI alternative. Accept 1s * delta as testing clock accuracy is not the goal here. The test just needs to * check that the value in shinfo is somewhat sane.
*/
TEST_ASSERT(llabs(delta) < NSEC_PER_SEC, "Guest's epoch from shinfo %d.%09d differs from KVM_GET_CLOCK %lld.%lld",
wc->sec, wc->nsec, (kcdata.realtime - kcdata.clock) / NSEC_PER_SEC,
(kcdata.realtime - kcdata.clock) % NSEC_PER_SEC);
} else {
pr_info("Missing KVM_CLOCK_REALTIME, skipping shinfo epoch sanity check\n");
}
TEST_ASSERT(ti->version && !(ti->version & 1), "Bad time_info version %x", ti->version);
TEST_ASSERT(ti2->version && !(ti2->version & 1), "Bad time_info version %x", ti->version);
if (do_runstate_tests) { /* * Fetch runstate and check sanity. Strictly speaking in the * general case we might not expect the numbers to be identical * but in this case we know we aren't running the vCPU any more.
*/ struct kvm_xen_vcpu_attr rst = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
};
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
if (verbose) {
printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
rs->state <= RUNSTATE_offline ? runstate_names[rs->state] : "unknown",
rs->state, rs->state_entry_time); for (int i = RUNSTATE_running; i <= RUNSTATE_offline; i++) {
printf("State %s: %" PRIu64 " ns\n",
runstate_names[i], rs->time[i]);
}
}
/* * Exercise runstate info at all points across the page boundary, in * 32-bit and 64-bit mode. In particular, test the case where it is * configured in 32-bit mode and then switched to 64-bit mode while * active, which takes it onto the second page.
*/ unsignedlong runstate_addr; struct compat_vcpu_runstate_info *crs; for (runstate_addr = SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE - sizeof(*rs) - 4;
runstate_addr < SHINFO_REGION_GPA + PAGE_SIZE + PAGE_SIZE + 4; runstate_addr++) {
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.