/* * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express * "negative" addresses. This makes it impossible to directly share * mappings with the kernel. * * Instead, give the HYP mode its own VA region at a fixed offset from * the kernel by just masking the top bits (which are all ones for a * kernel address). We need to find out how many bits to mask. * * We want to build a set of page tables that cover both parts of the * idmap (the trampoline page used to initialize EL2), and our normal * runtime VA space, at the same time. * * Given that the kernel uses VA_BITS for its entire address space, * and that half of that space (VA_BITS - 1) is used for the linear * mapping, we can also limit the EL2 space to (VA_BITS - 1). * * The main question is "Within the VA_BITS space, does EL2 use the * top or the bottom half of that space to shadow the kernel's linear * mapping?". As we need to idmap the trampoline page, this is * determined by the range in which this page lives. * * If the page is in the bottom half, we have to use the top half. If * the page is in the top half, we have to use the bottom half: * * T = __pa_symbol(__hyp_idmap_text_start) * if (T & BIT(VA_BITS - 1)) * HYP_VA_MIN = 0 //idmap in upper half * else * HYP_VA_MIN = 1 << (VA_BITS - 1) * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 * * When using VHE, there are no separate hyp mappings and all KVM * functionality is already mapped as part of the main kernel * mappings, and none of this applies in that case.
*/
#ifdef __ASSEMBLY__
#include <asm/alternative.h>
/* * Convert a hypervisor VA to a PA * reg: hypervisor address to be converted in place * tmp: temporary register
*/
.macro hyp_pa reg, tmp
ldr_l \tmp, hyp_physvirt_offset
add \reg, \reg, \tmp
.endm
/* * Convert a hypervisor VA to a kernel image address * reg: hypervisor address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions).
*/
.macro hyp_kimg_va reg, tmp /* Convert hyp VA -> PA. */
hyp_pa \reg, \tmp
/* * Convert a kernel VA into a HYP VA. * * Can be called from hyp or non-hyp context. * * The actual code generation takes place in kvm_update_va_mask(), and * the instructions below are only there to reserve the space and * perform the register allocation (kvm_update_va_mask() uses the * specific registers encoded in the instructions).
*/ static __always_inline unsignedlong __kern_hyp_va(unsignedlong v)
{ /* * This #ifndef is an optimisation for when this is called from VHE hyp * context. When called from a VHE non-hyp context, kvm_update_va_mask() will * replace the instructions with `nop`s.
*/ #ifndef __KVM_VHE_HYPERVISOR__ asmvolatile(ALTERNATIVE_CB("and %0, %0, #1\n"/* mask with va_mask */ "ror %0, %0, #1\n"/* rotate to the first tag bit */ "add %0, %0, #0\n"/* insert the low 12 bits of the tag */ "add %0, %0, #0, lsl 12\n"/* insert the top 12 bits of the tag */ "ror %0, %0, #63\n", /* rotate back */
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask)
: "+r" (v)); #endif return v;
}
/* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits.
*/ #define KVM_PHYS_SHIFT (40)
staticinlinevoid __clean_dcache_guest_page(void *va, size_t size)
{ /* * With FWB, we ensure that the guest always accesses memory using * cacheable attributes, and we don't have to clean to PoC when * faulting in pages. Furthermore, FWB implies IDC, so cleaning to * PoU is not required either in this case.
*/ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) return;
staticinlinevoid __invalidate_icache_guest_page(void *va, size_t size)
{ /* * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the * invalidation range exceeds our arbitrary limit on invadations by * cache line.
*/ if (icache_is_aliasing() || size > __invalidate_icache_max_range())
icache_inval_all_pou(); else
icache_inval_pou((unsignedlong)va, (unsignedlong)va + size);
}
staticinlineunsignedint kvm_get_vmid_bits(void)
{ int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return get_vmid_bits(reg);
}
/* * We are not in the kvm->srcu critical section most of the time, so we take * the SRCU read lock here. Since we copy the data from the user page, we * can immediately drop the lock again.
*/ staticinlineint kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsignedlong len)
{ int srcu_idx = srcu_read_lock(&kvm->srcu); int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
staticinlineint kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, constvoid *data, unsignedlong len)
{ int srcu_idx = srcu_read_lock(&kvm->srcu); int ret = kvm_write_guest(kvm, gpa, data, len);
/* * When this is (directly or indirectly) used on the TLB invalidation * path, we rely on a previously issued DSB so that page table updates * and VMID reads are correctly ordered.
*/ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{ struct kvm_vmid *vmid = &mmu->vmid;
u64 vmid_field, baddr;
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
/* * Must be called from hyp code running at EL2 with an updated VTTBR * and interrupts disabled.
*/ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, struct kvm_arch *arch)
{
write_sysreg(mmu->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/* * ARM errata 1165522 and 1530923 require the actual execution of the * above before we can switch to the EL1/EL0 translation regime used by * the guest.
*/ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
staticinlinebool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
{ /* * Be careful, mmu may not be fully initialised so do look at * *any* of its fields.
*/ return &kvm->arch.mmu != mmu;
}
/* * ARM64 KVM relies on a simple conversion from physaddr to a kernel * virtual address (KVA) when it does cache maintenance as the CMO * instructions work on virtual addresses. This is incompatible with * VM_PFNMAP VMAs which may not have a kernel direct mapping to a * virtual address. * * With S2FWB and CACHE DIC features, KVM need not do cache flushing * and CMOs are NOP'd. This has the effect of no longer requiring a * KVA for addresses mapped into the S2. The presence of these features * are thus necessary to support cacheable S2 mapping of VM_PFNMAP.
*/ staticinlinebool kvm_supports_cacheable_pfnmap(void)
{ return cpus_have_final_cap(ARM64_HAS_STAGE2_FWB) &&
cpus_have_final_cap(ARM64_HAS_CACHE_DIC);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.