/* * s390 needs at least 1MB alignment, and the x86 MOVE/DELETE tests need a 2MB * sized and aligned region so that the initial region corresponds to exactly * one large page.
*/ #define MEM_REGION_SIZE 0x200000
#ifdef __x86_64__ /* * Somewhat arbitrary location and slot, intended to not overlap anything.
*/ #define MEM_REGION_GPA 0xc0000000 #define MEM_REGION_SLOT 10
/* * Loop until the guest is done. Re-enter the guest on all MMIO exits, * which will occur if the guest attempts to access a memslot after it * has been deleted or while it is being moved .
*/ while (1) {
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) {
cmd = get_ucall(vcpu, &uc); if (cmd != UCALL_SYNC) break;
/* * Allocate and map two pages so that the GPA accessed by guest_code() * stays valid across the memslot move.
*/
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
/* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, 2 * 4096);
/* * Spin until the memory region starts getting moved to a * misaligned address. * Every region move may or may not trigger MMIO, as the * window where the memslot is invalid is usually quite small.
*/
val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL, "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
/* Spin until the misaligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
__GUEST_ASSERT(val == 1 || val == 0, "Expected '0' or '1' (no MMIO), got '%lx'", val);
/* Spin until the memory region starts to get re-aligned. */
val = guest_spin_on_val(0);
__GUEST_ASSERT(val == 1 || val == MMIO_VAL, "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
/* Spin until the re-aligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
GUEST_ASSERT_EQ(val, 1);
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
if (disable_slot_zap_quirk)
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
/* * Shift the region's base GPA. The guest should not see "2" as the * hva->gpa translation is misaligned, i.e. the guest is accessing a * different host pfn.
*/
vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
WRITE_ONCE(*hva, 2);
/* * The guest _might_ see an invalid memslot and trigger MMIO, but it's * a tiny window. Spin and defer the sync until the memslot is * restored and guest behavior is once again deterministic.
*/
usleep(100000);
/* * Note, value in memory needs to be changed *before* restoring the * memslot, else the guest could race the update and see "2".
*/
WRITE_ONCE(*hva, 1);
/* Restore the original base, the guest should see "1". */
vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
wait_for_vcpu(); /* Defered sync from when the memslot was misaligned (above). */
wait_for_vcpu();
/* * Clobber the IDT so that a #PF due to the memory region being deleted * escalates to triple-fault shutdown. Because the memory region is * deleted, there will be no valid mappings. As a result, KVM will * repeatedly intercepts the state-2 page fault that occurs when trying * to vector the guest's #PF. I.e. trying to actually handle the #PF * in the guest will never succeed, and so isn't an option.
*/
memset(&idt, 0, sizeof(idt));
set_idt(&idt);
GUEST_SYNC(0);
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
GUEST_ASSERT_EQ(val, MMIO_VAL);
/* Spin until the memory region is recreated. */
val = guest_spin_on_val(MMIO_VAL);
GUEST_ASSERT_EQ(val, 0);
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
GUEST_ASSERT_EQ(val, MMIO_VAL);
vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
if (disable_slot_zap_quirk)
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
/* Delete the memory region, the guest should not die. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
/* Recreate the memory region. The guest should see "0". */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
MEM_REGION_GPA, MEM_REGION_SLOT,
MEM_REGION_SIZE / getpagesize(), 0);
wait_for_vcpu();
/* Delete the region again so that there's only one memslot left. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
wait_for_vcpu();
/* * Delete the primary memslot. This should cause an emulation error or * shutdown due to the page tables getting nuked.
*/
vm_mem_region_delete(vm, 0);
/* * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, * so the instruction pointer would point to the reset vector.
*/ if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
TEST_ASSERT(regs.rip >= final_rip_start &&
regs.rip < final_rip_end, "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx",
final_rip_start, final_rip_end, regs.rip);
#ifdefined __aarch64__ || defined __riscv || defined __x86_64__ || defined __loongarch__
supported_flags |= KVM_MEM_READONLY; #endif
#ifdef __x86_64__ if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))
vm = vm_create_barebones_type(KVM_X86_SW_PROTECTED_VM); else #endif
vm = vm_create_barebones();
if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE)
supported_flags |= KVM_MEM_GUEST_MEMFD;
for (i = 0; i < 32; i++) { if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i))) continue;
r = __vm_set_user_memory_region(vm, 0, BIT(i),
0, MEM_REGION_SIZE, NULL);
TEST_ASSERT(r && errno == EINVAL, "KVM_SET_USER_MEMORY_REGION should have failed on v2 only flag 0x%lx", BIT(i));
if (supported_flags & BIT(i)) continue;
r = __vm_set_user_memory_region2(vm, 0, BIT(i),
0, MEM_REGION_SIZE, NULL, 0, 0);
TEST_ASSERT(r && errno == EINVAL, "KVM_SET_USER_MEMORY_REGION2 should have failed on unsupported flag 0x%lx", BIT(i));
}
if (supported_flags & KVM_MEM_GUEST_MEMFD) { int guest_memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0);
r = __vm_set_user_memory_region2(vm, 0,
KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD,
0, MEM_REGION_SIZE, NULL, guest_memfd, 0);
TEST_ASSERT(r && errno == EINVAL, "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported");
r = __vm_set_user_memory_region2(vm, 0,
KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD,
0, MEM_REGION_SIZE, NULL, guest_memfd, 0);
TEST_ASSERT(r && errno == EINVAL, "KVM_SET_USER_MEMORY_REGION2 should have failed, read-only GUEST_MEMFD memslots are unsupported");
close(guest_memfd);
}
}
/* * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any * tentative to add further slots should fail.
*/ staticvoid test_add_max_memory_regions(void)
{ int ret; struct kvm_vm *vm;
uint32_t max_mem_slots;
uint32_t slot; void *mem, *mem_aligned, *mem_extra;
size_t alignment;
#ifdef __s390x__ /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
alignment = 0x100000; #else
alignment = 1; #endif
max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
TEST_ASSERT(max_mem_slots > 0, "KVM_CAP_NR_MEMSLOTS should be greater than 0");
pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
vm = vm_create_barebones();
/* Check it can be added memory slots up to the maximum allowed */
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); for (i = 1; i < PAGE_SIZE; i++)
test_invalid_guest_memfd(vm, memfd, i, "Unaligned offset should fail");
/* * Delete the first memslot, and then attempt to recreate it except * with a "bad" offset that results in overlap in the guest_memfd().
*/
vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
MEM_REGION_GPA, 0, NULL, -1, 0);
/* Overlap the front half of the other slot. */
r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
MEM_REGION_GPA * 2 - MEM_REGION_SIZE,
MEM_REGION_SIZE * 2,
0, memfd, 0);
TEST_ASSERT(r == -1 && errno == EEXIST, "%s", "Overlapping guest_memfd() bindings should fail with EEXIST");
/* And now the back half of the other slot. */
r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
MEM_REGION_GPA * 2 + MEM_REGION_SIZE,
MEM_REGION_SIZE * 2,
0, memfd, 0);
TEST_ASSERT(r == -1 && errno == EEXIST, "%s", "Overlapping guest_memfd() bindings should fail with EEXIST");
/* Generate a #GP by dereferencing a non-canonical address */
*((uint8_t *)NONCANONICAL) = 0x1;
GUEST_ASSERT(0);
}
/* * This test points the IDT descriptor base to an MMIO address. It should cause * a KVM internal error when an event occurs in the guest.
*/ staticvoid test_mmio_during_vectoring(void)
{ struct kvm_vcpu *vcpu; struct kvm_run *run; struct kvm_vm *vm;
u64 expected_gpa;
pr_info("Testing MMIO during vectoring error handling\n");
vm = vm_create_with_one_vcpu(&vcpu, guest_code_mmio_during_vectoring);
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 1);
int main(int argc, char *argv[])
{ #ifdef __x86_64__ int i, loops; int j, disable_slot_zap_quirk = 0;
if (kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_SLOT_ZAP_ALL)
disable_slot_zap_quirk = 1; /* * FIXME: the zero-memslot test fails on aarch64 and s390x because * KVM_RUN fails with ENOEXEC or EFAULT.
*/
test_zero_memory_regions();
test_mmio_during_vectoring(); #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.