/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
(evmcs_ver & 0xff) > 0, "Incorrect EVMCS version range: %x:%x",
evmcs_ver & 0xff, evmcs_ver >> 8);
return evmcs_ver;
}
/* Allocate memory regions for nested VMX tests. * * Input Args: * vm - The VM to allocate guest-virtual addresses in. * * Output Args: * p_vmx_gva - The guest virtual address for the struct vmx_pages. * * Return: * Pointer to structure with the addresses of the VMX areas.
*/ struct vmx_pages *
vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
{
vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */
vmx->msr = (void *)vm_vaddr_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */
vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize());
/* * Ensure bits in CR0 and CR4 are valid in VMX operation: * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
*/
__asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
__asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
/* * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: * Bit 0: Lock bit. If clear, VMXON causes a #GP. * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON * outside of SMX causes a #GP.
*/
required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
required |= FEAT_CTL_LOCKED;
feature_control = rdmsr(MSR_IA32_FEAT_CTL); if ((feature_control & required) != required)
wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
/* Enter VMX root operation. */
*(uint32_t *)(vmx->vmxon) = vmcs_revision(); if (vmxon(vmx->vmxon_gpa)) returnfalse;
returntrue;
}
bool load_vmcs(struct vmx_pages *vmx)
{ /* Load a VMCS. */
*(uint32_t *)(vmx->vmcs) = vmcs_revision(); if (vmclear(vmx->vmcs_gpa)) returnfalse;
if (vmptrld(vmx->vmcs_gpa)) returnfalse;
/* Setup shadow VMCS, do not load it yet. */
*(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; if (vmclear(vmx->shadow_vmcs_gpa)) returnfalse;
/* * Initialize the control fields to the most basic settings possible.
*/ staticinlinevoid init_vmcs_control_fields(struct vmx_pages *vmx)
{
uint32_t sec_exec_ctl = 0;
/* * Initialize the host state fields based on the current host state, with * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch * or vmresume.
*/ staticinlinevoid init_vmcs_host_state(void)
{
uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
/* * Initialize the guest state fields essentially as a clone of * the host state fields. Some host state fields have fixed * values, and we set the corresponding guest state fields accordingly.
*/ staticinlinevoid init_vmcs_guest_state(void *rip, void *rsp)
{
vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
vmwrite(GUEST_LDTR_SELECTOR, 0);
vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
vmwrite(GUEST_INTR_STATUS, 0);
vmwrite(GUEST_PML_INDEX, 0);
/* * For now mark these as accessed and dirty because the only * testcase we have needs that. Can be reconsidered later.
*/
pte->accessed = true;
pte->dirty = true;
/* * Map a range of EPT guest physical addresses to the VM's physical address * * Input Args: * vm - Virtual Machine * nested_paddr - Nested guest physical address to map * paddr - VM Physical Address * size - The size of the range to map * level - The level at which to map the range * * Output Args: None * * Return: None * * Within the VM given by vm, creates a nested guest translation for the * page range starting at nested_paddr to the page range starting at paddr.
*/ void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size, int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
/* Prepare an identity extended page table that maps all the * physical pages in VM.
*/ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot)
{
sparsebit_idx_t i, last; struct userspace_mem_region *region =
memslot2region(vm, memslot);
i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
last = i + (region->region.memory_size >> vm->page_shift); for (;;) {
i = sparsebit_next_clear(region->unused_phy_pages, i); if (i > last) break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.