// SPDX-License-Identifier: GPL-2.0 /* * AMD Encrypted Register State Support * * Author: Joerg Roedel <jroedel@suse.de>
*/
/* * misc.h needs to be first because it knows how to include the other kernel * headers in the pre-decompression code in a way that does not break * compilation.
*/ #include"misc.h"
/* * If private -> shared then invalidate the page before requesting the * state change in the RMP table.
*/ if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(paddr, paddr, false);
/* Save the current GHCB MSR value */
msr = sev_es_rd_ghcb_msr();
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
/* Read the response of the VMGEXIT. */
val = sev_es_rd_ghcb_msr(); if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
/* Restore the GHCB MSR value */
sev_es_wr_ghcb_msr(msr);
/* * Now that page state is changed in the RMP table, validate it so that it is * consistent with the RMP entry.
*/ if (op == SNP_PAGE_STATE_PRIVATE)
pvalidate_4k_page(paddr, paddr, true);
}
void snp_set_page_private(unsignedlong paddr)
{ if (!sev_snp_enabled()) return;
bool early_setup_ghcb(void)
{ if (set_page_decrypted((unsignedlong)&boot_ghcb_page)) returnfalse;
/* Page is now mapped decrypted, clear it */
memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
boot_ghcb = &boot_ghcb_page;
/* Initialize lookup tables for the instruction decoder */
sev_insn_decode_init();
/* SNP guest requires the GHCB GPA must be registered */ if (sev_snp_enabled())
snp_register_ghcb_early(__pa(&boot_ghcb_page));
returntrue;
}
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{ for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
}
void sev_es_shutdown_ghcb(void)
{ if (!boot_ghcb) return;
if (!sev_es_check_cpu_features())
error("SEV-ES CPU Features missing.");
/* * This denotes whether to use the GHCB MSR protocol or the GHCB * shared page to perform a GHCB request. Since the GHCB page is * being changed to encrypted, it can't be used to perform GHCB * requests. Clear the boot_ghcb variable so that the GHCB MSR * protocol is used to change the GHCB page over to an encrypted * page.
*/
boot_ghcb = NULL;
/* * GHCB Page must be flushed from the cache and mapped encrypted again. * Otherwise the running kernel will see strange cache effects when * trying to use that page.
*/ if (set_page_encrypted((unsignedlong)&boot_ghcb_page))
error("Can't map GHCB page encrypted");
/* * GHCB page is mapped encrypted again and flushed from the cache. * Mark it non-present now to catch bugs when #VC exceptions trigger * after this point.
*/ if (set_page_non_present((unsignedlong)&boot_ghcb_page))
error("Can't unmap GHCB page");
}
while (true) asmvolatile("hlt\n" : : : "memory");
}
bool sev_es_check_ghcb_fault(unsignedlong address)
{ /* Check whether the fault was on the GHCB page */ return ((address & PAGE_MASK) == (unsignedlong)&boot_ghcb_page);
}
/* * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need * guest side implementation for proper functioning of the guest. If any * of these features are enabled in the hypervisor but are lacking guest * side implementation, the behavior of the guest will be undefined. The * guest could fail in non-obvious way making it difficult to debug. * * As the behavior of reserved feature bits is unknown to be on the * safe side add them to the required features mask.
*/ #define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
MSR_AMD64_SNP_REFLECT_VC | \
MSR_AMD64_SNP_RESTRICTED_INJ | \
MSR_AMD64_SNP_ALT_INJ | \
MSR_AMD64_SNP_DEBUG_SWAP | \
MSR_AMD64_SNP_VMPL_SSS | \
MSR_AMD64_SNP_SECURE_TSC | \
MSR_AMD64_SNP_VMGEXIT_PARAM | \
MSR_AMD64_SNP_VMSA_REG_PROT | \
MSR_AMD64_SNP_RESERVED_BIT13 | \
MSR_AMD64_SNP_RESERVED_BIT15 | \
MSR_AMD64_SNP_RESERVED_MASK)
/* * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented * by the guest kernel. As and when a new feature is implemented in the * guest kernel, a corresponding bit should be added to the mask.
*/ #define SNP_FEATURES_PRESENT (MSR_AMD64_SNP_DEBUG_SWAP | \
MSR_AMD64_SNP_SECURE_TSC)
u64 snp_get_unsupported_features(u64 status)
{ if (!(status & MSR_AMD64_SEV_SNP_ENABLED)) return 0;
return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
}
void snp_check_features(void)
{
u64 unsupported;
/* * Terminate the boot if hypervisor has enabled any feature lacking * guest side implementation. Pass on the unsupported features mask through * EXIT_INFO_2 of the GHCB protocol so that those features can be reported * as part of the guest boot failure.
*/
unsupported = snp_get_unsupported_features(sev_status); if (unsupported) { if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
/* * Initial set up of SNP relies on information provided by the * Confidential Computing blob, which can be passed to the boot kernel * by firmware/bootloader in the following ways: * * - via an entry in the EFI config table * - via a setup_data structure, as defined by the Linux Boot Protocol * * Scan for the blob in that order.
*/ staticstruct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{ struct cc_blob_sev_info *cc_info;
cc_info = find_cc_blob_efi(bp); if (cc_info) goto found_cc_info;
cc_info = find_cc_blob_setup_data(bp); if (!cc_info) return NULL;
found_cc_info: if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
return cc_info;
}
/* * Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks * will verify the SNP CPUID/MSR bits.
*/ staticbool early_snp_init(struct boot_params *bp)
{ struct cc_blob_sev_info *cc_info;
if (!bp) returnfalse;
cc_info = find_cc_blob(bp); if (!cc_info) returnfalse;
/* * If a SNP-specific Confidential Computing blob is present, then * firmware/bootloader have indicated SNP support. Verifying this * involves CPUID checks which will be more reliable if the SNP * CPUID table is used. See comments over snp_setup_cpuid_table() for * more details.
*/
setup_cpuid_table(cc_info);
/* * Record the SVSM Calling Area (CA) address if the guest is not * running at VMPL0. The CA will be used to communicate with the * SVSM and request its services.
*/
svsm_setup_ca(cc_info);
/* * Pass run-time kernel a pointer to CC info via boot_params so EFI * config table doesn't need to be searched again during early startup * phase.
*/
bp->cc_blob_address = (u32)(unsignedlong)cc_info;
returntrue;
}
/* * sev_check_cpu_support - Check for SEV support in the CPU capabilities * * Returns < 0 if SEV is not supported, otherwise the position of the * encryption bit in the page table descriptors.
*/ staticint sev_check_cpu_support(void)
{ unsignedint eax, ebx, ecx, edx;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx); if (eax < 0x8000001f) return -ENODEV;
/* * Check for the SME/SEV feature: * CPUID Fn8000_001F[EAX] * - Bit 0 - Secure Memory Encryption support * - Bit 1 - Secure Encrypted Virtualization support * CPUID Fn8000_001F[EBX] * - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx); /* Check whether SEV is supported */ if (!(eax & BIT(1))) return -ENODEV;
return ebx & 0x3f;
}
void sev_enable(struct boot_params *bp)
{ struct msr m; int bitpos; bool snp;
/* * bp->cc_blob_address should only be set by boot/compressed kernel. * Initialize it to 0 to ensure that uninitialized values from * buggy bootloaders aren't propagated.
*/ if (bp)
bp->cc_blob_address = 0;
/* * Do an initial SEV capability check before early_snp_init() which * loads the CPUID page and the same checks afterwards are done * without the hypervisor and are trustworthy. * * If the HV fakes SEV support, the guest will crash'n'burn * which is good enough.
*/
if (sev_check_cpu_support() < 0) return;
/* * Setup/preliminary detection of SNP. This will be sanity-checked * against CPUID/MSR values later.
*/
snp = early_snp_init(bp);
/* Now repeat the checks with the SNP CPUID table. */
bitpos = sev_check_cpu_support(); if (bitpos < 0) { if (snp)
error("SEV-SNP support indicated by CC blob, but not CPUID."); return;
}
/* Set the SME mask if this is an SEV guest. */
boot_rdmsr(MSR_AMD64_SEV, &m);
sev_status = m.q; if (!(sev_status & MSR_AMD64_SEV_ENABLED)) return;
/* Negotiate the GHCB protocol version. */ if (sev_status & MSR_AMD64_SEV_ES_ENABLED) { if (!sev_es_negotiate_protocol())
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED);
}
/* * SNP is supported in v2 of the GHCB spec which mandates support for HV * features.
*/ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
u64 hv_features; int ret;
hv_features = get_hv_features(); if (!(hv_features & GHCB_HV_FT_SNP))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
/* * Enforce running at VMPL0 or with an SVSM. * * Use RMPADJUST (see the rmpadjust() function for a description of * what the instruction does) to update the VMPL1 permissions of a * page. If the guest is running at VMPL0, this will succeed. If the * guest is running at any other VMPL, this will fail. Linux SNP guests * only ever run at a single VMPL level so permission mask changes of a * lesser-privileged VMPL are a don't-care.
*/
ret = rmpadjust((unsignedlong)&boot_ghcb_page, RMP_PG_SIZE_4K, 1);
/* * Running at VMPL0 is not required if an SVSM is present and the hypervisor * supports the required SVSM GHCB events.
*/ if (ret &&
!(snp_vmpl && (hv_features & GHCB_HV_FT_SNP_MULTI_VMPL)))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
}
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
sme_me_mask = BIT_ULL(bitpos);
}
/* * sev_get_status - Retrieve the SEV status mask * * Returns 0 if the CPU is not SEV capable, otherwise the value of the * AMD64_SEV MSR.
*/
u64 sev_get_status(void)
{ struct msr m;
if (sev_check_cpu_support() < 0) return 0;
boot_rdmsr(MSR_AMD64_SEV, &m); return m.q;
}
void sev_prep_identity_maps(unsignedlong top_level_pgt)
{ /* * The Confidential Computing blob is used very early in uncompressed * kernel to find the in-memory CPUID table to handle CPUID * instructions. Make sure an identity-mapping exists so it can be * accessed after switchover.
*/ if (sev_snp_enabled()) { unsignedlong cc_info_pa = boot_params_ptr->cc_blob_address; struct cc_blob_sev_info *cc_info;
if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) returnfalse;
sevsnp = true;
if (!snp_vmpl) { unsignedint eax, ebx, ecx, edx;
/* * CPUID Fn8000_001F_EAX[28] - SVSM support
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx); if (eax & BIT(28)) { struct msr m;
/* Obtain the address of the calling area to use */
boot_rdmsr(MSR_SVSM_CAA, &m);
boot_svsm_caa = (void *)m.q;
boot_svsm_caa_pa = m.q;
/* * The real VMPL level cannot be discovered, but the * memory acceptance routines make no use of that so * any non-zero value suffices here.
*/
snp_vmpl = U8_MAX;
}
} returntrue;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.