/* Only used in an SNP VM with the paravisor */ static u16 hv_ghcb_version __ro_after_init;
/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{ union hv_ghcb *hv_ghcb; void **ghcb_base; unsignedlong flags;
u64 status;
ghcb_set_rcx(&hv_ghcb->ghcb, msr); if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
pr_warn("Fail to read msr via ghcb %llx.\n", msr); else
*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
local_irq_restore(flags);
}
/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */ static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
/* Functions only used in an SNP VM without the paravisor go here. */
/* * Running at VMPL0 allows the kernel to change the VMSA bit for a page * using the RMPADJUST instruction. However, for the instruction to * succeed it must target the permissions of a lesser privileged * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST * instruction in the AMD64 APM Volume 3).
*/
attrs = 1; if (vmsa)
attrs |= RMPADJUST_VMSA_PAGE_BIT;
/* Find the Hyper-V VP index which might be not the same as APIC ID */
vp_index = hv_apicid_to_vp_index(apic_id); if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index) return -EINVAL;
/* * Set the SNP-specific fields for this VMSA: * VMPL level * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
vmsa->vmpl = 0;
vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true); if (ret) {
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
free_page((u64)vmsa); return ret;
}
if (hv_isolation_type_tdx())
hv_tdx_msr_write(msr, value); elseif (hv_isolation_type_snp())
hv_ghcb_msr_write(msr, value);
}
void hv_ivm_msr_read(u64 msr, u64 *value)
{ if (!ms_hyperv.paravisor_present) return;
if (hv_isolation_type_tdx())
hv_tdx_msr_read(msr, value); elseif (hv_isolation_type_snp())
hv_ghcb_msr_read(msr, value);
}
/* * hv_mark_gpa_visibility - Set pages visible to host via hvcall. * * In Isolation VM, all guest memory is encrypted from host and guest * needs to set memory visible to host via hvcall before sharing memory * with host.
*/ staticint hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility)
{ struct hv_gpa_range_for_visibility *input;
u64 hv_status; unsignedlong flags;
/* no-op if partition isolation is not enabled */ if (!hv_is_isolation_supported()) return 0;
if (hv_result_success(hv_status)) return 0; else return -EFAULT;
}
/* * When transitioning memory between encrypted and decrypted, the caller * of set_memory_encrypted() or set_memory_decrypted() is responsible for * ensuring that the memory isn't in use and isn't referenced while the * transition is in progress. The transition has multiple steps, and the * memory is in an inconsistent state until all steps are complete. A * reference while the state is inconsistent could result in an exception * that can't be cleanly fixed up. * * But the Linux kernel load_unaligned_zeropad() mechanism could cause a * stray reference that can't be prevented by the caller, so Linux has * specific code to handle this case. But when the #VC and #VE exceptions * routed to a paravisor, the specific code doesn't work. To avoid this * problem, mark the pages as "not present" while the transition is in * progress. If load_unaligned_zeropad() causes a stray reference, a normal * page fault is generated instead of #VC or #VE, and the page-fault-based * handlers for load_unaligned_zeropad() resolve the reference. When the * transition is complete, hv_vtom_set_host_visibility() marks the pages * as "present" again.
*/ staticint hv_vtom_clear_present(unsignedlong kbuffer, int pagecount, bool enc)
{ return set_memory_np(kbuffer, pagecount);
}
/* * hv_vtom_set_host_visibility - Set specified memory visible to host. * * In Isolation VM, all guest memory is encrypted from host and guest * needs to set memory visible to host via hvcall before sharing memory * with host. This function works as wrap of hv_mark_gpa_visibility() * with memory base and size.
*/ staticint hv_vtom_set_host_visibility(unsignedlong kbuffer, int pagecount, bool enc)
{ enum hv_mem_host_visibility visibility = enc ?
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
u64 *pfn_array;
phys_addr_t paddr; int i, pfn, err; void *vaddr; int ret = 0;
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); if (!pfn_array) {
ret = -ENOMEM; goto err_set_memory_p;
}
for (i = 0, pfn = 0; i < pagecount; i++) { /* * Use slow_virt_to_phys() because the PRESENT bit has been * temporarily cleared in the PTEs. slow_virt_to_phys() works * without the PRESENT bit while virt_to_hvpfn() or similar * does not.
*/
vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
paddr = slow_virt_to_phys(vaddr);
pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
pfn++;
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
ret = hv_mark_gpa_visibility(pfn, pfn_array,
visibility); if (ret) goto err_free_pfn_array;
pfn = 0;
}
}
err_free_pfn_array:
kfree(pfn_array);
err_set_memory_p: /* * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present() * did. Do this even if there is an error earlier in this function in * order to avoid leaving the memory range in a "broken" state. Setting * the PRESENT bits shouldn't fail, but return an error if it does.
*/
err = set_memory_p(kbuffer, pagecount); if (err && !ret)
ret = err;
return ret;
}
staticbool hv_vtom_tlb_flush_required(boolprivate)
{ /* * Since hv_vtom_clear_present() marks the PTEs as "not present" * and flushes the TLB, they can't be in the TLB. That makes the * flush controlled by this function redundant, so return "false".
*/ returnfalse;
}
staticbool hv_is_private_mmio(u64 addr)
{ /* * Hyper-V always provides a single IO-APIC in a guest VM. * When a paravisor is used, it is emulated by the paravisor * in the guest context and must be mapped private.
*/ if (addr >= HV_IOAPIC_BASE_ADDRESS &&
addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE)) returntrue;
/* Same with a vTPM */ if (addr >= VTPM_BASE_ADDRESS &&
addr < (VTPM_BASE_ADDRESS + PAGE_SIZE)) returntrue;
returnfalse;
}
void __init hv_vtom_init(void)
{ enum hv_isolation_type type = hv_get_isolation_type();
switch (type) { case HV_ISOLATION_TYPE_VBS:
fallthrough; /* * By design, a VM using vTOM doesn't see the SEV setting, * so SEV initialization is bypassed and sev_status isn't set. * Set it here to indicate a vTOM VM. * * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is * defined as 0ULL, to which we can't assigned a value.
*/ #ifdef CONFIG_AMD_MEM_ENCRYPT case HV_ISOLATION_TYPE_SNP:
sev_status = MSR_AMD64_SNP_VTOM;
cc_vendor = CC_VENDOR_AMD; break; #endif
case HV_ISOLATION_TYPE_TDX:
cc_vendor = CC_VENDOR_INTEL; break;
default:
panic("hv_vtom_init: unsupported isolation type %d\n", type);
}
/* * hv_is_isolation_supported - Check system runs in the Hyper-V * isolation VM.
*/ bool hv_is_isolation_supported(void)
{ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) returnfalse;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV)) returnfalse;
/* * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based * isolation VM.
*/ bool hv_isolation_type_snp(void)
{ return static_branch_unlikely(&isolation_type_snp);
}
DEFINE_STATIC_KEY_FALSE(isolation_type_tdx); /* * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based * isolated VM.
*/ bool hv_isolation_type_tdx(void)
{ return static_branch_unlikely(&isolation_type_tdx);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.