static __always_inline u64 rsvd_bits(int s, int e)
{
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
if (__builtin_constant_p(e))
BUILD_BUG_ON(e > 63); else
e &= 63;
if (e < s) return 0;
return ((2ULL << (e - s)) - 1) << s;
}
staticinline gfn_t kvm_mmu_max_gfn(void)
{ /* * Note that this uses the host MAXPHYADDR, not the guest's. * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR; * assuming KVM is running on bare metal, guest accesses beyond * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit * (either EPT Violation/Misconfig or #NPF), and so KVM will never * install a SPTE for such addresses. If KVM is running as a VM * itself, on the other hand, it might see a MAXPHYADDR that is less * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR * disallows such SPTEs entirely and simplifies the TDP MMU.
*/ int max_gpa_bits = likely(tdp_enabled) ? kvm_host.maxphyaddr : 52;
staticinlinevoid kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{ /* * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e. * @mmu's snapshot of CR0.WP and thus all related paging metadata may * be stale. Refresh CR0.WP and the metadata on-demand when checking * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does * need to refresh nested_mmu, a.k.a. the walker used to translate L2 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
*/ if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) return;
__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
}
/* * Check if a given access (described through the I/D, W/R and U/S bits of a * page fault error code pfec) causes a permission fault with the given PTE * access rights (in ACC_* format). * * Return zero if the access does not fault; return the page fault error code * if the access faults.
*/ staticinline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey,
u64 access)
{ /* strip nested paging fault error codes */ unsignedint pfec = access; unsignedlong rflags = kvm_x86_call(get_rflags)(vcpu);
/* * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. * For implicit supervisor accesses, SMAP cannot be overridden. * * SMAP works on supervisor accesses only, and not_smap can * be set or not set when user access with neither has any bearing * on the result. * * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit; * this bit will always be zero in pfec, but it will be one in index * if SMAP checks are being disabled.
*/
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS; bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC; int index = (pfec | (not_smap ? PFERR_RSVD_MASK : 0)) >> 1;
u32 errcode = PFERR_PRESENT_MASK; bool fault;
/* * PKRU defines 32 bits, there are 16 domains and 2 * attribute bits per domain in pkru. pte_pkey is the * index of the protection domain, so pte_pkey * 2 is * is the index of the first bit for the domain.
*/
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
staticinlinebool kvm_shadow_root_allocated(struct kvm *kvm)
{ /* * Read shadow_root_allocated before related pointers. Hence, threads * reading shadow_root_allocated in any lock context are guaranteed to * see the pointers. Pairs with smp_store_release in * mmu_first_shadow_root_alloc.
*/ return smp_load_acquire(&kvm->arch.shadow_root_allocated);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.