/* * Nothing shall interrupt this code path while holding the per-CPU * GHCB. The backup GHCB is only for NMIs interrupting this path. * * Callers must disable local interrupts around it.
*/
noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{ struct sev_es_runtime_data *data; struct ghcb *ghcb;
WARN_ON(!irqs_disabled());
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (unlikely(data->ghcb_active)) { /* GHCB is already in use - save its contents */
if (unlikely(data->backup_ghcb_active)) { /* * Backup-GHCB is also already in use. There is no way * to continue here so just kill the machine. To make * panic() work, mark GHCBs inactive so that messages * can be printed out.
*/
data->ghcb_active = false;
data->backup_ghcb_active = false;
instrumentation_begin();
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
instrumentation_end();
}
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
while (paddr < paddr_end) { /* Page validation must be rescinded before changing to shared */ if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(vaddr, paddr, false);
/* * Use the MSR protocol because this function can be called before * the GHCB is established.
*/
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) goto e_term;
if (GHCB_MSR_PSC_RESP_VAL(val)) goto e_term;
/* Page validation must be performed after changing to private */ if (op == SNP_PAGE_STATE_PRIVATE)
pvalidate_4k_page(vaddr, paddr, true);
void __head early_snp_set_memory_private(unsignedlong vaddr, unsignedlong paddr, unsignedlong npages)
{ /* * This can be invoked in early boot while running identity mapped, so * use an open coded check for SNP instead of using cc_platform_has(). * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function.
*/ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return;
/* * Ask the hypervisor to mark the memory pages as private in the RMP * table.
*/
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
}
void __head early_snp_set_memory_shared(unsignedlong vaddr, unsignedlong paddr, unsignedlong npages)
{ /* * This can be invoked in early boot while running identity mapped, so * use an open coded check for SNP instead of using cc_platform_has(). * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function.
*/ if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
}
/* * Initial set up of SNP relies on information provided by the * Confidential Computing blob, which can be passed to the kernel * in the following ways, depending on how it is booted: * * - when booted via the boot/decompress kernel: * - via boot_params * * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH): * - via a setup_data entry, as defined by the Linux Boot Protocol * * Scan for the blob in that order.
*/ static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{ struct cc_blob_sev_info *cc_info;
/* Boot kernel would have passed the CC blob via boot_params. */ if (bp->cc_blob_address) {
cc_info = (struct cc_blob_sev_info *)(unsignedlong)bp->cc_blob_address; goto found_cc_info;
}
/* * If kernel was booted directly, without the use of the * boot/decompression kernel, the CC blob may have been passed via * setup_data instead.
*/
cc_info = find_cc_blob_setup_data(bp); if (!cc_info) return NULL;
found_cc_info: if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
snp_abort();
/* * Record the SVSM Calling Area address (CAA) if the guest is not * running at VMPL0. The CA will be used to communicate with the * SVSM to perform the SVSM services.
*/ if (!svsm_setup_ca(cc_info)) return;
/* * It is very early in the boot and the kernel is running identity * mapped but without having adjusted the pagetables to where the * kernel was loaded (physbase), so the get the CA address using * RIP-relative addressing.
*/
pa = (u64)rip_rel_ptr(&boot_svsm_ca_page);
/* * Switch over to the boot SVSM CA while the current CA is still * addressable. There is no GHCB at this point so use the MSR protocol. * * SVSM_CORE_REMAP_CA call: * RAX = 0 (Protocol=0, CallID=0) * RCX = New CA GPA
*/
call.caa = svsm_get_caa();
call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA);
call.rcx = pa;
ret = svsm_perform_call_protocol(&call); if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL);
/* * The CC blob will be used later to access the secrets page. Cache * it here like the boot kernel does.
*/
bp->cc_blob_address = (u32)(unsignedlong)cc_info;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.