/* Minimum firmware version required for the SEV-SNP support */ #define SNP_MIN_API_MAJOR 1 #define SNP_MIN_API_MINOR 51
/* * Maximum number of firmware-writable buffers that might be specified * in the parameters of a legacy SEV command buffer.
*/ #define CMD_BUF_FW_WRITABLE_MAX 2
/* Leave room in the descriptor array for an end-of-list indicator. */ #define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1)
staticint psp_cmd_timeout = 100;
module_param(psp_cmd_timeout, int, 0644);
MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
staticint psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
staticchar *init_ex_path;
module_param(init_ex_path, charp, 0444);
MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX");
staticbool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
staticbool psp_dead; staticint psp_timeout;
/* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified * allocation order. * * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized.
*/ #define SEV_TMR_SIZE (1024 * 1024) #define SNP_TMR_SIZE (2 * 1024 * 1024)
/* INIT_EX NV Storage: * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page * allocator to allocate the memory, which will return aligned memory for the * specified allocation order.
*/ #define NV_LENGTH (32 * 1024) staticvoid *sev_init_ex_buffer;
/* * SEV_DATA_RANGE_LIST: * Array containing range of pages that firmware transitions to HV-fixed * page state.
*/ staticstruct sev_data_range_list *snp_range_list;
/* Check if it is command completion: */ if (!(status & SEV_CMD_COMPLETE)) return;
/* Check if it is SEV command completion: */
reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
sev->int_rcvd = 1;
wake_up(&sev->int_queue);
}
}
/* * If invoked during panic handling, local interrupts are disabled, * so the PSP command completion interrupt can't be used. Poll for * PSP command completion instead.
*/ if (irqs_disabled()) { unsignedlong timeout_usecs = (timeout * USEC_PER_SEC) / 10;
/* Poll for SEV command completion: */ while (timeout_usecs--) {
*reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); if (*reg & PSP_CMDRESP_RESP) return 0;
udelay(10);
} return -ETIMEDOUT;
}
ret = wait_event_timeout(sev->int_queue,
sev->int_rcvd, timeout * HZ); if (!ret) return -ETIMEDOUT;
staticint sev_cmd_buffer_len(int cmd)
{ switch (cmd) { case SEV_CMD_INIT: returnsizeof(struct sev_data_init); case SEV_CMD_INIT_EX: returnsizeof(struct sev_data_init_ex); case SEV_CMD_SNP_SHUTDOWN_EX: returnsizeof(struct sev_data_snp_shutdown_ex); case SEV_CMD_SNP_INIT_EX: returnsizeof(struct sev_data_snp_init_ex); case SEV_CMD_PLATFORM_STATUS: returnsizeof(struct sev_user_data_status); case SEV_CMD_PEK_CSR: returnsizeof(struct sev_data_pek_csr); case SEV_CMD_PEK_CERT_IMPORT: returnsizeof(struct sev_data_pek_cert_import); case SEV_CMD_PDH_CERT_EXPORT: returnsizeof(struct sev_data_pdh_cert_export); case SEV_CMD_LAUNCH_START: returnsizeof(struct sev_data_launch_start); case SEV_CMD_LAUNCH_UPDATE_DATA: returnsizeof(struct sev_data_launch_update_data); case SEV_CMD_LAUNCH_UPDATE_VMSA: returnsizeof(struct sev_data_launch_update_vmsa); case SEV_CMD_LAUNCH_FINISH: returnsizeof(struct sev_data_launch_finish); case SEV_CMD_LAUNCH_MEASURE: returnsizeof(struct sev_data_launch_measure); case SEV_CMD_ACTIVATE: returnsizeof(struct sev_data_activate); case SEV_CMD_DEACTIVATE: returnsizeof(struct sev_data_deactivate); case SEV_CMD_DECOMMISSION: returnsizeof(struct sev_data_decommission); case SEV_CMD_GUEST_STATUS: returnsizeof(struct sev_data_guest_status); case SEV_CMD_DBG_DECRYPT: returnsizeof(struct sev_data_dbg); case SEV_CMD_DBG_ENCRYPT: returnsizeof(struct sev_data_dbg); case SEV_CMD_SEND_START: returnsizeof(struct sev_data_send_start); case SEV_CMD_SEND_UPDATE_DATA: returnsizeof(struct sev_data_send_update_data); case SEV_CMD_SEND_UPDATE_VMSA: returnsizeof(struct sev_data_send_update_vmsa); case SEV_CMD_SEND_FINISH: returnsizeof(struct sev_data_send_finish); case SEV_CMD_RECEIVE_START: returnsizeof(struct sev_data_receive_start); case SEV_CMD_RECEIVE_FINISH: returnsizeof(struct sev_data_receive_finish); case SEV_CMD_RECEIVE_UPDATE_DATA: returnsizeof(struct sev_data_receive_update_data); case SEV_CMD_RECEIVE_UPDATE_VMSA: returnsizeof(struct sev_data_receive_update_vmsa); case SEV_CMD_LAUNCH_UPDATE_SECRET: returnsizeof(struct sev_data_launch_secret); case SEV_CMD_DOWNLOAD_FIRMWARE: returnsizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: returnsizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: returnsizeof(struct sev_data_attestation_report); case SEV_CMD_SEND_CANCEL: returnsizeof(struct sev_data_send_cancel); case SEV_CMD_SNP_GCTX_CREATE: returnsizeof(struct sev_data_snp_addr); case SEV_CMD_SNP_LAUNCH_START: returnsizeof(struct sev_data_snp_launch_start); case SEV_CMD_SNP_LAUNCH_UPDATE: returnsizeof(struct sev_data_snp_launch_update); case SEV_CMD_SNP_ACTIVATE: returnsizeof(struct sev_data_snp_activate); case SEV_CMD_SNP_DECOMMISSION: returnsizeof(struct sev_data_snp_addr); case SEV_CMD_SNP_PAGE_RECLAIM: returnsizeof(struct sev_data_snp_page_reclaim); case SEV_CMD_SNP_GUEST_STATUS: returnsizeof(struct sev_data_snp_guest_status); case SEV_CMD_SNP_LAUNCH_FINISH: returnsizeof(struct sev_data_snp_launch_finish); case SEV_CMD_SNP_DBG_DECRYPT: returnsizeof(struct sev_data_snp_dbg); case SEV_CMD_SNP_DBG_ENCRYPT: returnsizeof(struct sev_data_snp_dbg); case SEV_CMD_SNP_PAGE_UNSMASH: returnsizeof(struct sev_data_snp_page_unsmash); case SEV_CMD_SNP_PLATFORM_STATUS: returnsizeof(struct sev_data_snp_addr); case SEV_CMD_SNP_GUEST_REQUEST: returnsizeof(struct sev_data_snp_guest_request); case SEV_CMD_SNP_CONFIG: returnsizeof(struct sev_user_data_snp_config); case SEV_CMD_SNP_COMMIT: returnsizeof(struct sev_data_snp_commit); default: return 0;
}
fp = open_file_as_root(init_ex_path, O_RDONLY, 0); if (IS_ERR(fp)) { int ret = PTR_ERR(fp);
if (ret == -ENOENT) {
dev_info(sev->dev, "SEV: %s does not exist and will be created later.\n",
init_ex_path);
ret = 0;
} else {
dev_err(sev->dev, "SEV: could not open %s for read, error %d\n",
init_ex_path, ret);
} return ret;
}
nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); if (nread != NV_LENGTH) {
dev_info(sev->dev, "SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nread);
}
if (nwrite != NV_LENGTH) {
dev_err(sev->dev, "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nwrite); return -EIO;
}
dev_dbg(sev->dev, "SEV: write successful to NV file\n");
/* * Only a few platform commands modify the SPI/NV area, but none of the * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, * PEK_CERT_IMPORT, and PDH_GEN do.
*/ switch (cmd_id) { case SEV_CMD_FACTORY_RESET: case SEV_CMD_INIT_EX: case SEV_CMD_PDH_GEN: case SEV_CMD_PEK_CERT_IMPORT: case SEV_CMD_PEK_GEN: break; default: return 0;
}
return sev_write_init_ex_file();
}
/* * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() * needs snp_reclaim_pages(), so a forward declaration is needed.
*/ staticint __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
for (i = 0; i < npages; i++, paddr += PAGE_SIZE) { struct sev_data_snp_page_reclaim data = {0};
data.paddr = paddr;
if (locked)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err); else
ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
if (ret) goto cleanup;
ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K); if (ret) goto cleanup;
}
return 0;
cleanup: /* * If there was a failure reclaiming the page then it is no longer safe * to release it back to the system; leak it instead.
*/
snp_leak_pages(__phys_to_pfn(paddr), npages - i); return ret;
}
for (i = 0; i < npages; i++, pfn++) {
rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true); if (rc) goto cleanup;
}
return 0;
cleanup: /* * Try unrolling the firmware state changes by * reclaiming the pages which were already changed to the * firmware state.
*/
snp_reclaim_pages(paddr, i, locked);
page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true); if (!page) return NULL;
return page_address(page);
}
/** * struct cmd_buf_desc - descriptors for managing legacy SEV command address * parameters corresponding to buffers that may be written to by firmware. * * @paddr_ptr: pointer to the address parameter in the command buffer which may * need to be saved/restored depending on whether a bounce buffer * is used. In the case of a bounce buffer, the command buffer * needs to be updated with the address of the new bounce buffer * snp_map_cmd_buf_desc() has allocated specifically for it. Must * be NULL if this descriptor is only an end-of-list indicator. * * @paddr_orig: storage for the original address parameter, which can be used to * restore the original value in @paddr_ptr in cases where it is * replaced with the address of a bounce buffer. * * @len: length of buffer located at the address originally stored at @paddr_ptr * * @guest_owned: true if the address corresponds to guest-owned pages, in which * case bounce buffers are not needed.
*/ struct cmd_buf_desc {
u64 *paddr_ptr;
u64 paddr_orig;
u32 len; bool guest_owned;
};
/* * If a legacy SEV command parameter is a memory address, those pages in * turn need to be transitioned to/from firmware-owned before/after * executing the firmware command. * * Additionally, in cases where those pages are not guest-owned, a bounce * buffer is needed in place of the original memory address parameter. * * A set of descriptors are used to keep track of this handling, and * initialized here based on the specific commands being executed.
*/ staticvoid snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
{ switch (cmd) { case SEV_CMD_PDH_CERT_EXPORT: { struct sev_data_pdh_cert_export *data = cmd_buf;
/* Allocate a bounce buffer if this isn't a guest owned page. */ if (!desc->guest_owned) { struct page *page;
page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len)); if (!page) {
pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n"); return -ENOMEM;
}
/* Transition the buffer to firmware-owned. */ if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) {
pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n"); return -EFAULT;
}
/* Transition the buffers back to hypervisor-owned. */ if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) {
pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n"); return -EFAULT;
}
/* Copy data from bounce buffer and then free it. */ if (!desc->guest_owned) { void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr)); void *dst_buf = __va(__sme_clr(desc->paddr_orig));
for (i = 0; i < CMD_BUF_DESC_MAX; i++) { struct cmd_buf_desc *desc = &desc_list[i];
if (!desc->paddr_ptr) break;
if (snp_map_cmd_buf_desc(desc)) goto err_unmap;
}
return 0;
err_unmap: for (i--; i >= 0; i--)
snp_unmap_cmd_buf_desc(&desc_list[i]);
return -EFAULT;
}
staticint snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list)
{ int i, ret = 0;
for (i = 0; i < CMD_BUF_DESC_MAX; i++) { struct cmd_buf_desc *desc = &desc_list[i];
if (!desc->paddr_ptr) break;
if (snp_unmap_cmd_buf_desc(&desc_list[i]))
ret = -EFAULT;
}
return ret;
}
staticbool sev_cmd_buf_writable(int cmd)
{ switch (cmd) { case SEV_CMD_PLATFORM_STATUS: case SEV_CMD_GUEST_STATUS: case SEV_CMD_LAUNCH_START: case SEV_CMD_RECEIVE_START: case SEV_CMD_LAUNCH_MEASURE: case SEV_CMD_SEND_START: case SEV_CMD_SEND_UPDATE_DATA: case SEV_CMD_SEND_UPDATE_VMSA: case SEV_CMD_PEK_CSR: case SEV_CMD_PDH_CERT_EXPORT: case SEV_CMD_GET_ID: case SEV_CMD_ATTESTATION_REPORT: returntrue; default: returnfalse;
}
}
/* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */ staticbool snp_legacy_handling_needed(int cmd)
{ struct sev_device *sev = psp_master->sev_data;
if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list)) return -EFAULT;
/* * Before command execution, the command buffer needs to be put into * the firmware-owned state.
*/ if (sev_cmd_buf_writable(cmd)) { if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true)) return -EFAULT;
}
return 0;
}
staticint snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
{ if (!snp_legacy_handling_needed(cmd)) return 0;
/* * After command completion, the command buffer needs to be put back * into the hypervisor-owned state.
*/ if (sev_cmd_buf_writable(cmd)) if (snp_reclaim_pages(__pa(cmd_buf), 1, true)) return -EFAULT;
buf_len = sev_cmd_buffer_len(cmd); if (WARN_ON_ONCE(!data != !buf_len)) return -EINVAL;
/* * Copy the incoming data to driver's scratch buffer as __pa() will not * work for some memory, e.g. vmalloc'd addresses, and @data may not be * physically contiguous.
*/ if (data) { /* * Commands are generally issued one at a time and require the * sev_cmd_mutex, but there could be recursive firmware requests * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while * preparing buffers for another command. This is the only known * case of nesting in the current code, so exactly one * additional command buffer is available for that purpose.
*/ if (!sev->cmd_buf_active) {
cmd_buf = sev->cmd_buf;
sev->cmd_buf_active = true;
} elseif (!sev->cmd_buf_backup_active) {
cmd_buf = sev->cmd_buf_backup;
sev->cmd_buf_backup_active = true;
} else {
dev_err(sev->dev, "SEV: too many firmware commands in progress, no command buffers available.\n"); return -EBUSY;
}
memcpy(cmd_buf, data, buf_len);
/* * The behavior of the SEV-legacy commands is altered when the * SNP firmware is in the INIT state.
*/
ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list); if (ret) {
dev_err(sev->dev, "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n",
cmd, ret); return ret;
}
} else {
cmd_buf = sev->cmd_buf;
}
/* Get the physical address of the command buffer */
phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
/* * If invoked during panic handling, local interrupts are disabled so * the PSP command completion interrupt can't be used. * sev_wait_cmd_ioc() already checks for interrupts disabled and * polls for PSP command completion. Ensure we do not request an * interrupt from the PSP if irqs disabled.
*/ if (!irqs_disabled())
reg |= SEV_CMDRESP_IOC;
/* * PSP firmware may report additional error information in the * command buffer registers on error. Print contents of command * buffer registers if they changed.
*/
cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) {
dev_dbg(sev->dev, "Additional error information reported in cmdbuff:");
dev_dbg(sev->dev, " cmdbuff hi: %#010x\n", cmdbuff_hi);
dev_dbg(sev->dev, " cmdbuff lo: %#010x\n", cmdbuff_lo);
}
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);
}
/* * Copy potential output from the PSP back to data. Do this even on * failure in case the caller wants to glean something from the error.
*/ if (data) { int ret_reclaim; /* * Restore the page state after the command completes.
*/
ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf); if (ret_reclaim) {
dev_err(sev->dev, "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n",
cmd, ret_reclaim); return ret_reclaim;
}
memcpy(data, cmd_buf, buf_len);
if (sev->cmd_buf_backup_active)
sev->cmd_buf_backup_active = false; else
sev->cmd_buf_active = false;
if (snp_unmap_cmd_buf_desc_list(desc_list)) return -EFAULT;
}
memset(&data, 0, sizeof(data)); if (sev_es_tmr) { /* * Do not include the encryption mask on the physical * address of the TMR (firmware should clear it anyway).
*/
data.tmr_address = __pa(sev_es_tmr);
if (sev_es_tmr) { /* * Do not include the encryption mask on the physical * address of the TMR (firmware should clear it anyway).
*/
data.tmr_address = __pa(sev_es_tmr);
/* * Ensure the list of HV_FIXED pages that will be passed to firmware * do not exceed the page-sized argument buffer.
*/ if ((range_list->num_elements * sizeof(struct sev_data_range) + sizeof(struct sev_data_range_list)) > PAGE_SIZE) return -E2BIG;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) return -ENODEV;
sev = psp->sev_data;
if (sev->snp_initialized) return 0;
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
on_each_cpu(snp_set_hsave_pa, NULL, 1);
/* * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list * of system physical address ranges to convert into HV-fixed page * states during the RMP initialization. For instance, the memory that * UEFI reserves should be included in the that list. This allows system * components that occasionally write to memory (e.g. logging to UEFI * reserved regions) to not fail due to RMP initialization and SNP * enablement. *
*/ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { /* * Firmware checks that the pages containing the ranges enumerated * in the RANGES structure are either in the default page state or in the * firmware page state.
*/
snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!snp_range_list) {
dev_err(sev->dev, "SEV: SNP_INIT_EX range list memory allocation failed\n"); return -ENOMEM;
}
/* * Retrieve all reserved memory regions from the e820 memory map * to be setup as HV-fixed pages.
*/
rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0,
snp_range_list, snp_filter_reserved_mem_regions); if (rc) {
dev_err(sev->dev, "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc); return rc;
}
/* * The following sequence must be issued before launching the first SNP * guest to ensure all dirty cache lines are flushed, including from * updates to the RMP table itself via the RMPUPDATE instruction: * * - WBINVD on all running CPUs * - SEV_CMD_SNP_INIT[_EX] firmware command * - WBINVD on all running CPUs * - SEV_CMD_SNP_DF_FLUSH firmware command
*/
wbinvd_on_all_cpus();
staticvoid __sev_platform_init_handle_tmr(struct sev_device *sev)
{ if (sev_es_tmr) return;
/* Obtain the TMR memory area for SEV-ES use */
sev_es_tmr = sev_fw_alloc(sev_es_tmr_size); if (sev_es_tmr) { /* Must flush the cache before giving it to the firmware */ if (!sev->snp_initialized)
clflush_cache_range(sev_es_tmr, sev_es_tmr_size);
} else {
dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n");
}
}
/* * If an init_ex_path is provided allocate a buffer for the file and * read in the contents. Additionally, if SNP is initialized, convert * the buffer pages to firmware pages.
*/ staticint __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
{ struct page *page; int rc;
if (!psp_master || !psp_master->sev_data) return -ENODEV;
sev = psp_master->sev_data;
if (sev->state == SEV_STATE_INIT) return 0;
__sev_platform_init_handle_tmr(sev);
rc = __sev_platform_init_handle_init_ex_path(sev); if (rc) return rc;
rc = __sev_do_init_locked(&psp_ret); if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { /* * Initialization command returned an integrity check failure * status code, meaning that firmware load and validation of SEV * related persistent data has failed. Retrying the * initialization function should succeed by replacing the state * with a reset state.
*/
dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
rc = __sev_do_init_locked(&psp_ret);
}
staticint _sev_platform_init_locked(struct sev_platform_init_args *args)
{ struct sev_device *sev; int rc;
if (!psp_master || !psp_master->sev_data) return -ENODEV;
/* * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP * may already be initialized in the previous kernel. Since no * SNP/SEV guests are run under a kdump kernel, there is no * need to initialize SNP or SEV during kdump boot.
*/ if (is_kdump_kernel()) return 0;
staticint sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{ int state, rc;
if (!writable) return -EPERM;
/* * The SEV spec requires that FACTORY_RESET must be issued in * UNINIT state. Before we go further lets check if any guest is * active. * * If FW is in WORKING state then deny the request otherwise issue * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. *
*/
rc = sev_get_platform_state(&state, &argp->error); if (rc) return rc;
if (state == SEV_STATE_WORKING) return -EBUSY;
if (state == SEV_STATE_INIT) {
rc = __sev_platform_shutdown_locked(&argp->error); if (rc) return rc;
}
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT;
memset(&data, 0, sizeof(data));
/* userspace wants to query CSR length */ if (!input.address || !input.length) goto cmd;
/* allocate a physically contiguous buffer to store the CSR blob */
input_address = (void __user *)input.address; if (input.length > SEV_FW_BLOB_MAX_SIZE) return -EFAULT;
blob = kzalloc(input.length, GFP_KERNEL); if (!blob) return -ENOMEM;
/* Check for SEV FW for a particular model. * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h * * or * * Check for SEV FW common to a subset of models. * Ex. amd_sev_fam17h_model0xh.sbin for * Family 17h Model 00h -- Family 17h Model 0Fh * * or * * Fall-back to using generic name: sev.fw
*/ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
(firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
(firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) return 0;
return -ENOENT;
}
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ staticint sev_update_firmware(struct device *dev)
{ struct sev_data_download_firmware *data; conststruct firmware *firmware; int ret, error, order; struct page *p;
u64 data_size;
if (!sev_version_greater_or_equal(0, 15)) {
dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1;
}
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
dev_dbg(dev, "No SEV firmware file present\n"); return -1;
}
/* * SEV FW expects the physical address given to it to be 32 * byte aligned. Memory allocated has structure placed at the * beginning followed by the firmware being passed to the SEV * FW. Allocate enough memory for data structure + alignment * padding + SEV FW.
*/
data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
order = get_order(firmware->size + data_size);
p = alloc_pages(GFP_KERNEL, order); if (!p) {
ret = -1; goto fw_err;
}
/* * Copy firmware data to a kernel allocated contiguous * memory region.
*/
data = page_address(p);
memcpy(page_address(p) + data_size, firmware->data, firmware->size);
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
/* * A quirk for fixing the committed TCB version, when upgrading from * earlier firmware version than 1.50.
*/ if (!ret && !sev_version_greater_or_equal(1, 50))
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
/* * If invoked during panic handling, local interrupts are disabled * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called. * In that case, a wbinvd() is done on remote CPUs via the NMI * callback, so only a local wbinvd() is needed here.
*/ if (!panic)
wbinvd_on_all_cpus(); else
wbinvd();
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); /* SHUTDOWN may require DF_FLUSH */ if (*error == SEV_RET_DFFLUSH_REQUIRED) { int dfflush_error = SEV_RET_NO_FW_CALL;
ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error); if (ret) {
dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
ret, dfflush_error); return ret;
} /* reissue the shutdown command */
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data,
error);
} if (ret) {
dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
ret, *error); return ret;
}
/* * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP * enforcement by the IOMMU and also transitions all pages * associated with the IOMMU to the Reclaim state. * Firmware was transitioning the IOMMU pages to Hypervisor state * before version 1.53. But, accounting for the number of assigned * 4kB pages in a 2M page was done incorrectly by not transitioning * to the Reclaim state. This resulted in RMP #PF when later accessing * the 2M page containing those pages during kexec boot. Hence, the * firmware now transitions these pages to Reclaim state and hypervisor * needs to transition these pages to shared state. SNP Firmware * version 1.53 and above are needed for kexec boot.
*/
ret = amd_iommu_snp_disable(); if (ret) {
dev_err(sev->dev, "SNP IOMMU shutdown failed\n"); return ret;
}
/* * __sev_snp_shutdown_locked() deadlocks when it tries to unregister * itself during panic as the panic notifier is called with RCU read * lock held and notifier unregistration does RCU synchronization.
*/ if (!panic)
atomic_notifier_chain_unregister(&panic_notifier_list,
&snp_panic_notifier);
/* Reset TMR size back to default */
sev_es_tmr_size = SEV_TMR_SIZE;
/* If platform is not in INIT state then transition it to INIT */ if (sev->state != SEV_STATE_INIT) {
ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_oca;
}
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca: if (shutdown_required)
__sev_firmware_shutdown(sev, false);
/* SEV GET_ID is available from SEV API v0.16 and up */ if (!sev_version_greater_or_equal(0, 16)) return -ENOTSUPP;
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT;
input_address = (void __user *)input.address;
if (input.address && input.length) { /* * The length of the ID shouldn't be assumed by software since * it may change in the future. The allocation size is limited * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator. * If the allocation fails, simply return ENOMEM rather than * warning in the kernel log.
*/
id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); if (!id_blob) return -ENOMEM;
ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error);
/* * Firmware will return the length of the ID value (either the minimum * required length or the actual length written), return it to the user.
*/
input.length = data.len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT; goto e_free;
}
if (id_blob) { if (copy_to_user(input_address, id_blob, data.len)) {
ret = -EFAULT; goto e_free;
}
}
/* SEV GET_ID available from SEV API v0.16 and up */ if (!sev_version_greater_or_equal(0, 16)) return -ENOTSUPP;
/* SEV FW expects the buffer it fills with the ID to be * 8-byte aligned. Memory allocated should be enough to * hold data structure + alignment padding + memory * where SEV FW writes the ID.
*/
data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
user_size = sizeof(struct sev_user_data_get_id);
mem = kzalloc(data_size + user_size, GFP_KERNEL); if (!mem) return -ENOMEM;
ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); if (!ret) { if (copy_to_user((void __user *)argp->data, id_blob, data->len))
ret = -EFAULT;
}
cmd: /* If platform is not in INIT state then transition it to INIT. */ if (sev->state != SEV_STATE_INIT) { if (!writable) {
ret = -EPERM; goto e_free_cert;
}
ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_cert;
}
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
input.cert_chain_len = data.cert_chain_len;
input.pdh_cert_len = data.pdh_cert_len;
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
ret = -EFAULT; goto e_free_cert;
}
if (pdh_blob) { if (copy_to_user(input_pdh_cert_address,
pdh_blob, input.pdh_cert_len)) {
ret = -EFAULT; goto e_free_cert;
}
}
if (cert_blob) { if (copy_to_user(input_cert_chain_address,
cert_blob, input.cert_chain_len))
ret = -EFAULT;
}
e_free_cert: if (shutdown_required)
__sev_firmware_shutdown(sev, false);
status_page = alloc_page(GFP_KERNEL_ACCOUNT); if (!status_page) return -ENOMEM;
data = page_address(status_page);
if (!sev->snp_initialized) {
ret = snp_move_to_init_state(argp, &shutdown_required); if (ret) goto cleanup;
}
/* * Firmware expects status page to be in firmware-owned state, otherwise * it will report firmware error code INVALID_PAGE_STATE (0x1A).
*/ if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
ret = -EFAULT; goto cleanup;
}
buf.address = __psp_pa(data);
ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
/* * Status page will be transitioned to Reclaim state upon success, or * left in Firmware state in failure. Use snp_reclaim_pages() to * transition either case back to Hypervisor-owned state.
*/ if (snp_reclaim_pages(__pa(data), 1, true)) return -EFAULT;
if (ret) goto cleanup;
if (copy_to_user((void __user *)argp->data, data, sizeof(struct sev_user_data_snp_status)))
ret = -EFAULT;
cleanup: if (shutdown_required)
__sev_snp_shutdown_locked(&error, false);
if (!psp_master || !psp_master->sev_data) return -ENODEV;
if (ioctl != SEV_ISSUE_CMD) return -EINVAL;
if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT;
if (input.cmd > SEV_MAX) return -EINVAL;
mutex_lock(&sev_cmd_mutex);
switch (input.cmd) {
case SEV_FACTORY_RESET:
ret = sev_ioctl_do_reset(&input, writable); break; case SEV_PLATFORM_STATUS:
ret = sev_ioctl_do_platform_status(&input); break; case SEV_PEK_GEN:
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); break; case SEV_PDH_GEN:
ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); break; case SEV_PEK_CSR:
ret = sev_ioctl_do_pek_csr(&input, writable); break; case SEV_PEK_CERT_IMPORT:
ret = sev_ioctl_do_pek_import(&input, writable); break; case SEV_PDH_CERT_EXPORT:
ret = sev_ioctl_do_pdh_export(&input, writable); break; case SEV_GET_ID:
pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
ret = sev_ioctl_do_get_id(&input); break; case SEV_GET_ID2:
ret = sev_ioctl_do_get_id2(&input); break; case SNP_PLATFORM_STATUS:
ret = sev_ioctl_do_snp_platform_status(&input); break; case SNP_COMMIT:
ret = sev_ioctl_do_snp_commit(&input); break; case SNP_SET_CONFIG:
ret = sev_ioctl_do_snp_set_config(&input, writable); break; case SNP_VLEK_LOAD:
ret = sev_ioctl_do_snp_vlek_load(&input, writable); break; default:
ret = -EINVAL; goto out;
}
if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
ret = -EFAULT;
out:
mutex_unlock(&sev_cmd_mutex);
/* * SEV feature support can be detected on multiple devices but the SEV * FW commands must be issued on the master. During probe, we do not * know the master hence we create /dev/sev on the first device probe. * sev_do_cmd() finds the right master device to which to issue the * command to the firmware.
*/ if (!misc_dev) { struct miscdevice *misc;
misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); if (!misc_dev) return -ENOMEM;
staticvoid __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{ int error;
__sev_platform_shutdown_locked(&error);
if (sev_es_tmr) { /* * The TMR area was encrypted, flush it from the cache. * * If invoked during panic handling, local interrupts are * disabled and all CPUs are stopped, so wbinvd_on_all_cpus() * can't be used. In that case, wbinvd() is done on remote CPUs * via the NMI callback, and done for this CPU later during * SNP shutdown, so wbinvd_on_all_cpus() can be skipped.
*/ if (!panic)
wbinvd_on_all_cpus();
/* * If sev_cmd_mutex is already acquired, then it's likely * another PSP command is in flight and issuing a shutdown * would fail in unexpected ways. Rather than create even * more confusion during a panic, just bail out here.
*/ if (mutex_is_locked(&sev_cmd_mutex)) return NOTIFY_DONE;
__sev_firmware_shutdown(sev, true);
return NOTIFY_DONE;
}
int sev_issue_cmd_external_user(struct file *filep, unsignedint cmd, void *data, int *error)
{ if (!filep || filep->f_op != &sev_fops) return -EBADF;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.