/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
/* * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, * on SMM exit we still need to reload them from * guest memory
*/
vcpu->arch.pdptrs_from_userspace = false;
}
/* * Give enter_smm() a chance to make ISA-specific changes to the vCPU * state (e.g. leave guest mode) after we've saved the state into the * SMM state-save area. * * Kill the VM in the unlikely case of failure, because the VM * can be in undefined state in this case.
*/ if (kvm_x86_call(enter_smm)(vcpu, &smram)) goto error;
kvm_smm_changed(vcpu, true);
if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram))) goto error;
if (kvm_x86_call(get_nmi_mask)(vcpu))
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; else
kvm_x86_call(set_nmi_mask)(vcpu, true);
/* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
pcid = 0; if (cr4 & X86_CR4_PCIDE) {
pcid = cr3 & 0xfff;
cr3 &= ~0xfff;
}
bad = kvm_set_cr3(vcpu, cr3); if (bad) return X86EMUL_UNHANDLEABLE;
/* * First enable PAE, long mode needs it before CR0.PG = 1 is set. * Then enable protected mode. However, PCID cannot be enabled * if EFER.LMA=0, so set it separately.
*/
bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); if (bad) return X86EMUL_UNHANDLEABLE;
bad = kvm_set_cr0(vcpu, cr0); if (bad) return X86EMUL_UNHANDLEABLE;
if (cr4 & X86_CR4_PCIDE) {
bad = kvm_set_cr4(vcpu, cr4); if (bad) return X86EMUL_UNHANDLEABLE; if (pcid) {
bad = kvm_set_cr3(vcpu, cr3 | pcid); if (bad) return X86EMUL_UNHANDLEABLE;
}
}
return X86EMUL_CONTINUE;
}
staticint rsm_load_state_32(struct x86_emulate_ctxt *ctxt, conststruct kvm_smram_state_32 *smstate)
{ struct kvm_vcpu *vcpu = ctxt->vcpu; struct desc_ptr dt; int i, r;
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{ struct kvm_vcpu *vcpu = ctxt->vcpu; unsignedlong cr0; union kvm_smram smram;
u64 smbase; int ret;
smbase = vcpu->arch.smbase;
ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram)); if (ret < 0) return X86EMUL_UNHANDLEABLE;
if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
kvm_x86_call(set_nmi_mask)(vcpu, false);
kvm_smm_changed(vcpu, false);
/* * Get back to real mode, to prepare a safe state in which to load * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * supports long mode.
*/ #ifdef CONFIG_X86_64 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) { struct kvm_segment cs_desc; unsignedlong cr4;
/* Zero CR4.PCIDE before CR0.PG. */
cr4 = kvm_read_cr4(vcpu); if (cr4 & X86_CR4_PCIDE)
kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
cs_desc.type = 0xb;
cs_desc.s = cs_desc.g = cs_desc.present = 1;
kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
} #endif
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = kvm_read_cr0(vcpu); if (cr0 & X86_CR0_PE)
kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
#ifdef CONFIG_X86_64 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) { unsignedlong cr4, efer;
/* Clear CR4.PAE before clearing EFER.LME. */
cr4 = kvm_read_cr4(vcpu); if (cr4 & X86_CR4_PAE)
kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
/* And finally go back to 32-bit mode. */
efer = 0;
kvm_set_msr(vcpu, MSR_EFER, efer);
} #endif
/* * FIXME: When resuming L2 (a.k.a. guest mode), the transition to guest * mode should happen _after_ loading state from SMRAM. However, KVM * piggybacks the nested VM-Enter flows (which is wrong for many other * reasons), and so nSVM/nVMX would clobber state that is loaded from * SMRAM and from the VMCS/VMCB.
*/ if (kvm_x86_call(leave_smm)(vcpu, &smram)) return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
ret = rsm_load_state_64(ctxt, &smram.smram64); else #endif
ret = rsm_load_state_32(ctxt, &smram.smram32);
/* * If RSM fails and triggers shutdown, architecturally the shutdown * occurs *before* the transition to guest mode. But due to KVM's * flawed handling of RSM to L2 (see above), the vCPU may already be * in_guest_mode(). Force the vCPU out of guest mode before delivering * the shutdown, so that L1 enters shutdown instead of seeing a VM-Exit * that architecturally shouldn't be possible.
*/ if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu))
kvm_leave_nested(vcpu); return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.