/* * Since SME related variables are set early in the boot process they must * reside in the .data section so as not to be zeroed out when the .bss * section is later cleared.
*/
u64 sme_me_mask __section(".data") = 0;
SYM_PIC_ALIAS(sme_me_mask);
u64 sev_status __section(".data") = 0;
SYM_PIC_ALIAS(sev_status);
u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */ staticchar sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
/* * SNP-specific routine which needs to additionally change the page state from * private to shared before copying the data from the source to destination and * restore after the copy.
*/ staticinlinevoid __init snp_memcpy(void *dst, void *src, size_t sz, unsignedlong paddr, bool decrypt)
{ unsignedlong npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
if (decrypt) { /* * @paddr needs to be accessed decrypted, mark the page shared in * the RMP table before copying it.
*/
early_snp_set_memory_shared((unsignedlong)__va(paddr), paddr, npages);
memcpy(dst, src, sz);
/* Restore the page state after the memcpy. */
early_snp_set_memory_private((unsignedlong)__va(paddr), paddr, npages);
} else { /* * @paddr need to be accessed encrypted, no need for the page state * change.
*/
memcpy(dst, src, sz);
}
}
/* * This routine does not change the underlying encryption setting of the * page(s) that map this memory. It assumes that eventually the memory is * meant to be accessed as either encrypted or decrypted but the contents * are currently not in the desired state. * * This routine follows the steps outlined in the AMD64 Architecture * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
*/ staticvoid __init __sme_early_enc_dec(resource_size_t paddr, unsignedlong size, bool enc)
{ void *src, *dst;
size_t len;
if (!sme_me_mask) return;
wbinvd();
/* * There are limited number of early mapping slots, so map (at most) * one page at time.
*/ while (size) {
len = min_t(size_t, sizeof(sme_early_buffer), size);
/* * Create mappings for the current and desired format of * the memory. Use a write-protected mapping for the source.
*/
src = enc ? early_memremap_decrypted_wp(paddr, len) :
early_memremap_encrypted_wp(paddr, len);
/* * If a mapping can't be obtained to perform the operation, * then eventual access of that area in the desired mode * will cause a crash.
*/
BUG_ON(!src || !dst);
/* * Use a temporary buffer, of cache-line multiple size, to * avoid data corruption as documented in the APM.
*/ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
snp_memcpy(sme_early_buffer, src, len, paddr, enc);
snp_memcpy(dst, sme_early_buffer, len, paddr, !enc);
} else {
memcpy(sme_early_buffer, src, len);
memcpy(dst, sme_early_buffer, len);
}
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return;
/* Get the command line address before unmapping the real_mode_data */
boot_data = (struct boot_params *)real_mode_data;
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
/* Get the command line address after mapping the real_mode_data */
boot_data = (struct boot_params *)real_mode_data;
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
staticint amd_enc_status_change_prepare(unsignedlong vaddr, int npages, bool enc)
{ /* * To maintain the security guarantees of SEV-SNP guests, make sure * to invalidate the memory before encryption attribute is cleared.
*/ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
snp_set_memory_shared(vaddr, npages);
return 0;
}
/* Return true unconditionally: return value doesn't matter for the SEV side */ staticint amd_enc_status_change_finish(unsignedlong vaddr, int npages, bool enc)
{ /* * After memory is mapped encrypted in the page table, validate it * so that it is consistent with the page table updates.
*/ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
snp_set_memory_private(vaddr, npages);
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
return 0;
}
int prepare_pte_enc(struct pte_enc_desc *d)
{
pgprot_t old_prot;
d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot); if (!d->pfn) return 1;
/* * In-place en-/decryption and physical page attribute change * from C=1 to C=0 or vice versa will be performed. Flush the * caches to ensure that data gets accessed with the correct * C-bit.
*/ if (d->va)
clflush_cache_range(d->va, d->size); else
clflush_cache_range(__va(d->pa), d->size);
/* Encrypt/decrypt the contents in-place */ if (enc) {
sme_early_encrypt(d.pa, d.size);
} else {
sme_early_decrypt(d.pa, d.size);
/* * ON SNP, the page state in the RMP table must happen * before the page table updates.
*/
early_snp_set_memory_shared((unsignedlong)__va(d.pa), d.pa, 1);
}
set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
/* * If page is set encrypted in the page table, then update the RMP table to * add this page as private.
*/ if (enc)
early_snp_set_memory_private((unsignedlong)__va(d.pa), d.pa, 1);
}
/* * Check whether we can change the large page in one go. * We request a split when the address is not aligned and * the number of pages to set/clear encryption bit is smaller * than the number of pages in the large page.
*/ if (vaddr == (vaddr & pmask) &&
((vaddr_end - vaddr) >= psize)) {
__set_clr_pte_enc(kpte, level, enc);
vaddr_next = (vaddr & pmask) + psize; continue;
}
/* * The virtual address is part of a larger page, create the next * level page table mapping (4K or 2M). If it is part of a 2M * page then we request a split of the large page into 4K * chunks. A 1GB large page is split into 2M pages, resp.
*/ if (level == PG_LEVEL_2M)
split_page_size_mask = 0; else
split_page_size_mask = 1 << PG_LEVEL_2M;
/* * kernel_physical_mapping_change() does not flush the TLBs, so * a TLB flush is required after we exit from the for loop.
*/
kernel_physical_mapping_change(__pa(vaddr & pmask),
__pa((vaddr_end & pmask) + psize),
split_page_size_mask);
}
/* * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the * parallel bringup low level code. That raises #VC which cannot be * handled there. * It does not provide a RDMSR GHCB protocol so the early startup * code cannot directly communicate with the secure firmware. The * alternative solution to retrieve the APIC ID via CPUID(0xb), * which is covered by the GHCB protocol, is not viable either * because there is no enforcement of the CPUID(0xb) provided * "initial" APIC ID to be the same as the real APIC ID. * Disable parallel bootup.
*/ if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
x86_cpuinit.parallel_bringup = false;
/* * The VMM is capable of injecting interrupt 0x80 and triggering the * compatibility syscall path. * * By default, the 32-bit emulation is disabled in order to ensure * the safety of the VM.
*/ if (sev_status & MSR_AMD64_SEV_ENABLED)
ia32_disable();
/* * Override init functions that scan the ROM region in SEV-SNP guests, * as this memory is not pre-validated and would thus cause a crash.
*/ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
x86_init.mpparse.find_mptable = x86_init_noop;
x86_init.pci.init_irq = x86_init_noop;
x86_init.resources.probe_roms = x86_init_noop;
/* * DMI setup behavior for SEV-SNP guests depends on * efi_enabled(EFI_CONFIG_TABLES), which hasn't been * parsed yet. snp_dmi_setup() will run after that * parsing has happened.
*/
x86_init.resources.dmi_setup = snp_dmi_setup;
}
/* * Switch the SVSM CA mapping (if active) from identity mapped to * kernel mapped.
*/
snp_update_svsm_ca();
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
}
void __init mem_encrypt_free_decrypted_mem(void)
{ unsignedlong vaddr, vaddr_end, npages; int r;
/* * If the unused memory range was mapped decrypted, change the encryption * attribute from decrypted to encrypted before freeing it. Base the * re-encryption on the same condition used for the decryption in * sme_postprocess_startup(). Higher level abstractions, such as * CC_ATTR_MEM_ENCRYPT, aren't necessarily equivalent in a Hyper-V VM * using vTOM, where sme_me_mask is always zero.
*/ if (sme_me_mask) {
r = set_memory_encrypted(vaddr, npages); if (r) {
pr_warn("failed to free unused decrypted pages\n"); return;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.