staticvoid vt_disable_virtualization_cpu(void)
{ /* Note, TDX *and* VMX need to be disabled if TDX is enabled. */ if (enable_tdx)
tdx_disable_virtualization_cpu();
vmx_disable_virtualization_cpu();
}
static __init int vt_hardware_setup(void)
{ int ret;
ret = vmx_hardware_setup(); if (ret) return ret;
if (enable_tdx)
tdx_hardware_setup();
return 0;
}
staticint vt_vm_init(struct kvm *kvm)
{ if (is_td(kvm)) return tdx_vm_init(kvm);
return vmx_vm_init(kvm);
}
staticvoid vt_vm_pre_destroy(struct kvm *kvm)
{ if (is_td(kvm)) return tdx_mmu_release_hkid(kvm);
}
staticvoid vt_vm_destroy(struct kvm *kvm)
{ if (is_td(kvm)) return tdx_vm_destroy(kvm);
vmx_vm_destroy(kvm);
}
staticint vt_vcpu_precreate(struct kvm *kvm)
{ if (is_td(kvm)) return 0;
return vmx_vcpu_precreate(kvm);
}
staticint vt_vcpu_create(struct kvm_vcpu *vcpu)
{ if (is_td_vcpu(vcpu)) return tdx_vcpu_create(vcpu);
staticvoid vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ if (is_td_vcpu(vcpu)) {
tdx_vcpu_load(vcpu, cpu); return;
}
vmx_vcpu_load(vcpu, cpu);
}
staticvoid vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
{ /* * Basic TDX does not support feature PML. KVM does not enable PML in * TD's VMCS, nor does it allocate or flush PML buffer for TDX.
*/ if (WARN_ON_ONCE(is_td_vcpu(vcpu))) return;
/* * The kvm parameter can be NULL (module initialization, or invocation before * VM creation). Be sure to check the kvm parameter before using it.
*/ staticbool vt_has_emulated_msr(struct kvm *kvm, u32 index)
{ if (kvm && is_td(kvm)) return tdx_has_emulated_msr(index);
staticvoid vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{ /* * TDX doesn't allow VMM to configure interception of MSR accesses. * TDX guest requests MSR accesses by calling TDVMCALL. The MSR * filters will be applied when handling the TDVMCALL for RDMSR/WRMSR * if the userspace has set any.
*/ if (is_td_vcpu(vcpu)) return;
vmx_recalc_msr_intercepts(vcpu);
}
staticint vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{ if (is_td_vcpu(vcpu)) return tdx_complete_emulated_msr(vcpu, err);
staticvoid vt_enable_smi_window(struct kvm_vcpu *vcpu)
{ if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm)) return;
/* RSM will cause a vmexit anyway. */
vmx_enable_smi_window(vcpu);
} #endif
staticint vt_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len)
{ /* * For TDX, this can only be triggered for MMIO emulation. Let the * guest retry after installing the SPTE with suppress #VE bit cleared, * so that the guest will receive #VE when retry. The guest is expected * to call TDG.VP.VMCALL<MMIO> to request VMM to do MMIO emulation on * #VE.
*/ if (is_td_vcpu(vcpu)) return X86EMUL_RETRY_INSTR;
staticbool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{ /* * INIT and SIPI are always blocked for TDX, i.e., INIT handling and * the OP vcpu_deliver_sipi_vector() won't be called.
*/ if (is_td_vcpu(vcpu)) returntrue;
return vmx_apic_init_signal_blocked(vcpu);
}
staticvoid vt_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
{ /* Only x2APIC mode is supported for TD. */ if (is_td_vcpu(vcpu)) return;
return vmx_set_virtual_apic_mode(vcpu);
}
staticvoid vt_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
{ if (is_td_vcpu(vcpu)) return;
return vmx_hwapic_isr_update(vcpu, max_isr);
}
staticint vt_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{ if (is_td_vcpu(vcpu)) return -1;
return vmx_sync_pir_to_irr(vcpu);
}
staticvoid vt_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector)
{ if (is_td_vcpu(apic->vcpu)) {
tdx_deliver_interrupt(apic, delivery_mode, trig_mode,
vector); return;
}
staticvoid vt_set_dr7(struct kvm_vcpu *vcpu, unsignedlong val)
{ if (is_td_vcpu(vcpu)) return;
vmx_set_dr7(vcpu, val);
}
staticvoid vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{ /* * MOV-DR exiting is always cleared for TD guest, even in debug mode. * Thus KVM_DEBUGREG_WONT_EXIT can never be set and it should never * reach here for TD vcpu.
*/ if (is_td_vcpu(vcpu)) return;
staticint vt_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{ /* * The TDX module manages NMI windows and NMI reinjection, and hides NMI * blocking, all KVM can do is throw an NMI over the wall.
*/ if (is_td_vcpu(vcpu)) returntrue;
return vmx_nmi_allowed(vcpu, for_injection);
}
staticbool vt_get_nmi_mask(struct kvm_vcpu *vcpu)
{ /* * KVM can't get NMI blocking status for TDX guest, assume NMIs are * always unmasked.
*/ if (is_td_vcpu(vcpu)) returnfalse;
return vmx_get_nmi_mask(vcpu);
}
staticvoid vt_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{ if (is_td_vcpu(vcpu)) return;
vmx_set_nmi_mask(vcpu, masked);
}
staticvoid vt_enable_nmi_window(struct kvm_vcpu *vcpu)
{ /* Refer to the comments in tdx_inject_nmi(). */ if (is_td_vcpu(vcpu)) return;
vmx_enable_nmi_window(vcpu);
}
staticvoid vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
{ if (is_td_vcpu(vcpu)) {
tdx_load_mmu_pgd(vcpu, root_hpa, pgd_level); return;
}
vmx_load_mmu_pgd(vcpu, root_hpa, pgd_level);
}
staticvoid vt_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{ if (is_td_vcpu(vcpu)) return;
vmx_set_interrupt_shadow(vcpu, mask);
}
static u32 vt_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{ if (is_td_vcpu(vcpu)) return 0;
return vmx_get_interrupt_shadow(vcpu);
}
staticvoid vt_patch_hypercall(struct kvm_vcpu *vcpu, unsignedchar *hypercall)
{ /* * Because guest memory is protected, guest can't be patched. TD kernel * is modified to use TDG.VP.VMCALL for hypercall.
*/ if (is_td_vcpu(vcpu)) return;
vmx_patch_hypercall(vcpu, hypercall);
}
staticvoid vt_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
{ if (is_td_vcpu(vcpu)) return;
vmx_inject_irq(vcpu, reinjected);
}
staticvoid vt_inject_exception(struct kvm_vcpu *vcpu)
{ if (is_td_vcpu(vcpu)) return;
vmx_inject_exception(vcpu);
}
staticvoid vt_cancel_injection(struct kvm_vcpu *vcpu)
{ if (is_td_vcpu(vcpu)) return;
staticint __init vt_init(void)
{ unsigned vcpu_size, vcpu_align; int r;
r = vmx_init(); if (r) return r;
/* tdx_init() has been taken */
r = tdx_bringup(); if (r) goto err_tdx_bringup;
/* * TDX and VMX have different vCPU structures. Calculate the * maximum size/align so that kvm_init() can use the larger * values to create the kmem_vcpu_cache.
*/
vcpu_size = sizeof(struct vcpu_vmx);
vcpu_align = __alignof__(struct vcpu_vmx); if (enable_tdx) {
vcpu_size = max_t(unsigned, vcpu_size, sizeof(struct vcpu_tdx));
vcpu_align = max_t(unsigned, vcpu_align,
__alignof__(struct vcpu_tdx));
kvm_caps.supported_vm_types |= BIT(KVM_X86_TDX_VM);
}
/* * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace!
*/
r = kvm_init(vcpu_size, vcpu_align, THIS_MODULE); if (r) goto err_kvm_init;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.