/* * Try to allocate a PMD-aligned region to reduce TLB pressure once * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
*/
hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
PMD_SIZE); if (!hyp_mem_base)
hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE); else
hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
if (!hyp_mem_base) {
kvm_err("Failed to reserve hyp memory\n"); return;
}
kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
hyp_mem_base);
}
hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT); if (!hyp_vcpu) return -ENOMEM;
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, vcpu, hyp_vcpu); if (!ret)
vcpu_set_flag(vcpu, VCPU_PKVM_FINALIZED); else
free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
return ret;
}
/* * Allocates and donates memory for hypervisor VM structs at EL2. * * Allocates space for the VM state, which includes the hyp vm as well as * the hyp vcpus. * * Stores an opaque handler in the kvm struct for future reference. * * Return 0 on success, negative error code on failure.
*/ staticint __pkvm_create_hyp_vm(struct kvm *host_kvm)
{
size_t pgd_sz, hyp_vm_sz; void *pgd, *hyp_vm; int ret;
/* * The PGD pages will be reclaimed using a hyp_memcache which implies * page granularity. So, use alloc_pages_exact() to get individual * refcounts.
*/
pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT); if (!pgd) return -ENOMEM;
/* Allocate memory to donate to hyp for vm and vcpu pointers. */
hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
size_mul(sizeof(void *),
host_kvm->created_vcpus)));
hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT); if (!hyp_vm) {
ret = -ENOMEM; goto free_pgd;
}
/* Donate the VM memory to hyp and let hyp initialize it. */
ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd); if (ret < 0) goto free_vm;
int pkvm_create_hyp_vm(struct kvm *host_kvm)
{ int ret = 0;
mutex_lock(&host_kvm->arch.config_lock); if (!host_kvm->arch.pkvm.handle)
ret = __pkvm_create_hyp_vm(host_kvm);
mutex_unlock(&host_kvm->arch.config_lock);
return ret;
}
int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
{ int ret = 0;
mutex_lock(&vcpu->kvm->arch.config_lock); if (!vcpu_get_flag(vcpu, VCPU_PKVM_FINALIZED))
ret = __pkvm_create_hyp_vcpu(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock);
int pkvm_init_host_vm(struct kvm *host_kvm)
{ return 0;
}
staticvoid __init _kvm_host_prot_finalize(void *arg)
{ int *err = arg;
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
WRITE_ONCE(*err, -EINVAL);
}
staticint __init pkvm_drop_host_privileges(void)
{ int ret = 0;
/* * Flip the static key upfront as that may no longer be possible * once the host stage 2 is installed.
*/
static_branch_enable(&kvm_protected_mode_initialized);
on_each_cpu(_kvm_host_prot_finalize, &ret, 1); return ret;
}
staticint __init finalize_pkvm(void)
{ int ret;
if (!is_protected_kvm_enabled() || !is_kvm_arm_initialised()) return 0;
/* * Exclude HYP sections from kmemleak so that they don't get peeked * at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
kmemleak_free_part(__hyp_rodata_start, __hyp_rodata_end - __hyp_rodata_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
ret = pkvm_drop_host_privileges(); if (ret)
pr_err("Failed to finalize Hyp protection: %d\n", ret);
if (size != PAGE_SIZE && size != PMD_SIZE) return -EINVAL;
lockdep_assert_held_write(&kvm->mmu_lock);
/* * Calling stage2_map() on top of existing mappings is either happening because of a race * with another vCPU, or because we're changing between page and block mappings. As per * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
*/
mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1); if (mapping) { if (size == (mapping->nr_pages * PAGE_SIZE)) return -EAGAIN;
/* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
ret = __pkvm_pgtable_stage2_unmap(pgt, addr, addr + size); if (ret) return ret;
mapping = NULL;
}
ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot); if (WARN_ON(ret)) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.