/** * DOC: cxl pci * * This implements the PCI exclusive functionality for a CXL device as it is * defined by the Compute Express Link specification. CXL devices may surface * certain functionality even if it isn't CXL enabled. While this driver is * focused around the PCI specific aspects of a CXL device, it binds to the * specific CXL memory device class code, and therefore the implementation of * cxl_pci is focused around CXL memory devices. * * The driver has several responsibilities, mainly: * - Create the memX device and register on the CXL bus. * - Enumerate device's register interface and map them. * - Registers nvdimm bridge device with cxl_core. * - Registers a CXL mailbox with cxl_core.
*/
/* * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to * dictate how long to wait for the mailbox to become ready. The new * field allows the device to tell software the amount of time to wait * before mailbox ready. This field per the spec theoretically allows * for up to 255 seconds. 255 seconds is unreasonably long, its longer * than the maximum SATA port link recovery wait. Default to 60 seconds * until someone builds a CXL device that needs more time in practice.
*/ staticunsignedshort mbox_ready_timeout = 60;
module_param(mbox_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { /* Check again in case preempted before timeout test */ if (!cxl_doorbell_busy(cxlds)) break; return -ETIMEDOUT;
}
cpu_relax();
}
/* * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique * wrapper object for each irq within the same cxlds.
*/ struct cxl_dev_id { struct cxl_dev_state *cxlds;
};
/** * __cxl_pci_mbox_send_cmd() - Execute a mailbox command * @cxl_mbox: CXL mailbox context * @mbox_cmd: Command to send to the memory device. * * Context: Any context. Expects mbox_mutex to be held. * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. * Caller should check the return code in @mbox_cmd to make sure it * succeeded. * * This is a generic form of the CXL mailbox send command thus only using the * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory * devices, and perhaps other types of CXL devices may have further information * available upon error conditions. Driver facilities wishing to send mailbox * commands should use the wrapper command. * * The CXL spec allows for up to two mailboxes. The intention is for the primary * mailbox to be OS controlled and the secondary mailbox to be used by system * firmware. This allows the OS and firmware to communicate with the device and * not need to coordinate with each other. The driver only uses the primary * mailbox.
*/ staticint __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd)
{ struct cxl_dev_state *cxlds = mbox_to_cxlds(cxl_mbox); struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len; int rc;
lockdep_assert_held(&cxl_mbox->mbox_mutex);
/* * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. * 1. Caller reads MB Control Register to verify doorbell is clear * 2. Caller writes Command Register * 3. Caller writes Command Payload Registers if input payload is non-empty * 4. Caller writes MB Control Register to set doorbell * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured * 6. Caller reads MB Status Register to fetch Return code * 7. If command successful, Caller reads Command Register to get Payload Length * 8. If output payload is non-empty, host reads Command Payload Registers * * Hardware is free to do whatever it wants before the doorbell is rung, * and isn't allowed to change anything after it clears the doorbell. As * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can * also happen in any order (though some orders might not make sense).
*/
/* * With sanitize polling, hardware might be done and the poller still * not be in sync. Ensure no new command comes in until so. Keep the * hardware semantics and only allow device health status.
*/ if (mds->security.poll_tmo_secs > 0) { if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO) return -EBUSY;
}
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
mbox_cmd->opcode); if (mbox_cmd->size_in) { if (WARN_ON(!mbox_cmd->payload_in)) return -EINVAL;
/* * Handle the background command in a synchronous manner. * * All other mailbox commands will serialize/queue on the mbox_mutex, * which we currently hold. Furthermore this also guarantees that * cxl_mbox_background_complete() checks are safe amongst each other, * in that no new bg operation can occur in between. * * Background operations are timesliced in accordance with the nature * of the command. In the event of timeout, the mailbox state is * indeterminate until the next successful command submission and the * driver can get back in sync with the hardware state.
*/ if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
u64 bg_status_reg; int i, timeout;
/* * Sanitization is a special case which monopolizes the device * and cannot be timesliced. Handle asynchronously instead, * and allow userspace to poll(2) for completion.
*/ if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { if (mds->security.sanitize_active) return -EBUSY;
/* give first timeout a second */
timeout = 1;
mds->security.poll_tmo_secs = timeout;
mds->security.sanitize_active = true;
schedule_delayed_work(&mds->security.poll_dwork,
timeout * HZ);
dev_dbg(dev, "Sanitization operation started\n"); goto success;
}
if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
dev_dbg(dev, "Mailbox operation had an error: %s\n",
cxl_mbox_cmd_rc2str(mbox_cmd)); return 0; /* completed but caller must check return_code */
}
/* #8 */ if (out_len && mbox_cmd->payload_out) { /* * Sanitize the copy. If hardware misbehaves, out_len per the * spec can actually be greater than the max allowed size (21 * bits available but spec defined 1M max). The caller also may * have requested less data than the hardware supplied even * within spec.
*/
size_t n;
/* * A command may be in flight from a previous driver instance, * think kexec, do one doorbell wait so that * __cxl_pci_mbox_send_cmd() can assume that it is the only * source for future doorbell busy events.
*/ if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
cxl_err(dev, md_status, "timeout awaiting mailbox idle"); return -ETIMEDOUT;
}
/* * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register * * If the size is too small, mandatory commands will not work and so * there's no point in going forward. If the size is too large, there's * no harm is soft limiting it.
*/
cxl_mbox->payload_size = min_t(size_t, cxl_mbox->payload_size, SZ_1M); if (cxl_mbox->payload_size < 256) {
dev_err(dev, "Mailbox is too small (%zub)",
cxl_mbox->payload_size); return -ENXIO;
}
/* * Assume that any RCIEP that emits the CXL memory expander class code * is an RCD
*/ staticbool is_cxl_restricted(struct pci_dev *pdev)
{ return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
}
/* * If the Register Locator DVSEC does not exist, check if it * is an RCH and try to extract the Component Registers from * an RCRB.
*/ if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) { struct cxl_dport *dport; struct cxl_port *port __free(put_cxl_port) =
cxl_pci_find_port(pdev, &dport); if (!port) return -EPROBE_DEFER;
rc = cxl_rcrb_get_comp_regs(pdev, map, dport); if (rc) return rc;
rc = cxl_dport_map_rcd_linkcap(pdev, dport); if (rc) return rc;
/* * There is a single buffer for reading event logs from the mailbox. All logs * share this buffer protected by the mds->event_log_lock.
*/ staticint cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_get_event_payload *buf;
staticbool cxl_alloc_irq_vectors(struct pci_dev *pdev)
{ int nvecs;
/* * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must * not generate INTx messages if that function participates in * CXL.cache or CXL.mem. * * Additionally pci_alloc_irq_vectors() handles calling * pci_free_irq_vectors() automatically despite not being called * pcim_*. See pci_setup_msi_context().
*/
nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nvecs < 1) {
dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs); returnfalse;
} returntrue;
}
do { /* * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status; * ignore the reserved upper 32 bits
*/
status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET); /* Ignore logs unknown to the driver */
status &= CXLDEV_EVENT_STATUS_ALL; if (!status) break;
cxl_mem_get_event_records(mds, status);
cond_resched();
} while (status);
/* * When BIOS maintains CXL error reporting control, it will process * event records. Only one agent can do so.
*/ if (!host_bridge->native_cxl_error) return 0;
if (!irq_avail) {
dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n"); return 0;
}
rc = cxl_event_get_int_policy(mds, &policy); if (rc) return rc;
if (cxl_event_int_is_fw(policy.info_settings) ||
cxl_event_int_is_fw(policy.warn_settings) ||
cxl_event_int_is_fw(policy.failure_settings) ||
cxl_event_int_is_fw(policy.fatal_settings)) {
dev_err(mds->cxlds.dev, "FW still in control of Event Logs despite _OSC settings\n"); return -EBUSY;
}
rc = cxl_mem_alloc_event_buf(mds); if (rc) return rc;
rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); if (rc) return rc;
rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs); if (rc) return rc;
/* * If the component registers can't be found, the cxl_pci driver may * still be useful for management functions so don't return an error.
*/
rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
&cxlds->reg_map); if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc); elseif (!cxlds->reg_map.component_map.ras.valid)
dev_dbg(&pdev->dev, "RAS registers not found\n");
rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
BIT(CXL_CM_CAP_CAP_ID_RAS)); if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
rc = cxl_pci_type3_init_mailbox(cxlds); if (rc) return rc;
rc = cxl_await_media_ready(cxlds); if (rc == 0)
cxlds->media_ready = true; else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
irq_avail = cxl_alloc_irq_vectors(pdev);
rc = cxl_pci_setup_mailbox(mds, irq_avail); if (rc) return rc;
rc = cxl_enumerate_cmds(mds); if (rc) return rc;
rc = cxl_set_timestamp(mds); if (rc) return rc;
rc = cxl_poison_state_init(mds); if (rc) return rc;
rc = cxl_dev_state_identify(mds); if (rc) return rc;
rc = cxl_mem_dpa_fetch(mds, &range_info); if (rc) return rc;
rc = cxl_dpa_setup(cxlds, &range_info); if (rc) return rc;
rc = devm_cxl_setup_features(cxlds); if (rc)
dev_dbg(&pdev->dev, "No CXL Features discovered\n");
cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd);
rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); if (rc) return rc;
rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd); if (rc) return rc;
rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd); if (rc)
dev_dbg(&pdev->dev, "No CXL FWCTL setup\n");
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU); if (pmu_count < 0) return pmu_count;
for (i = 0; i < pmu_count; i++) { struct cxl_pmu_regs pmu_regs;
rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i); if (rc) {
dev_dbg(&pdev->dev, "Could not find PMU regblock\n"); break;
}
rc = cxl_map_pmu_regs(&map, &pmu_regs); if (rc) {
dev_dbg(&pdev->dev, "Could not map PMU regs\n"); break;
}
rc = devm_cxl_pmu_add(cxlds->dev, &pmu_regs, cxlmd->id, i, CXL_PMU_MEMDEV); if (rc) {
dev_dbg(&pdev->dev, "Could not add PMU instance\n"); break;
}
}
rc = cxl_event_config(host_bridge, mds, irq_avail); if (rc) return rc;
if (cxl_pci_ras_unmask(pdev))
dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
pci_save_state(pdev);
return rc;
}
staticconststruct pci_device_id cxl_mem_pci_tbl[] = { /* PCI class code for CXL.mem Type-3 Devices */
{ PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
{ /* terminate list */ },
};
MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
/* * FLR does not expect to touch the HDM decoders and related * registers. SBR, however, will wipe all device configurations. * Issue a warning if there was an active decoder before the reset * that no longer exists.
*/
guard(device)(&cxlmd->dev); if (cxlmd->endpoint &&
cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
dev_crit(dev, "SBR happened without memory regions removal.\n");
dev_crit(dev, "System may be unstable if regions hosted system memory.\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.