/* sync with latest submission state */
mutex_lock(&cxl_mbox->mbox_mutex); if (mds->security.sanitize_active)
rc = sysfs_emit(buf, "sanitize\n");
mutex_unlock(&cxl_mbox->mbox_mutex); if (rc) return rc;
if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) return sysfs_emit(buf, "disabled\n"); if (state & CXL_PMEM_SEC_STATE_FROZEN ||
state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
state & CXL_PMEM_SEC_STATE_USER_PLIMIT) return sysfs_emit(buf, "frozen\n"); if (state & CXL_PMEM_SEC_STATE_LOCKED) return sysfs_emit(buf, "locked\n");
/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ for (int i = 0; i < cxlds->nr_partitions; i++) { conststruct resource *res = &cxlds->part[i].res;
offset = res->start;
length = resource_size(res);
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); /* * Invalid Physical Address is not an error for * volatile addresses. Device support is optional.
*/ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
rc = 0;
} return rc;
}
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
{ struct cxl_port *port; int rc;
port = cxlmd->endpoint; if (!port || !is_cxl_endpoint(port)) return -EINVAL;
ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc;
ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) return rc;
if (cxl_num_decoders_committed(port) == 0) { /* No regions mapped to this memdev */
rc = cxl_get_poison_by_memdev(cxlmd);
} else { /* Regions mapped, collect poison by endpoint */
rc = cxl_get_poison_by_endpoint(port);
}
if (!resource_size(&cxlds->dpa_res)) {
dev_dbg(cxlds->dev, "device has no dpa resource\n"); return -EINVAL;
} if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
dpa, &cxlds->dpa_res); return -EINVAL;
} if (!IS_ALIGNED(dpa, 64)) {
dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa); return -EINVAL;
}
ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc;
ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) return rc;
rc = cxl_validate_poison_dpa(cxlmd, dpa); if (rc) return rc;
/* * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command * is defined to accept 64 bytes of write-data, along with the * address to clear. This driver uses zeroes as write-data.
*/
clear = (struct cxl_mbox_clear_poison) {
.address = cpu_to_le64(dpa)
};
/** * set_exclusive_cxl_commands() - atomically disable user cxl commands * @mds: The device state to operate on * @cmds: bitmap of commands to mark exclusive * * Grab the cxl_memdev_rwsem in write mode to flush in-flight * invocations of the ioctl path and then disable future execution of * commands with the command ids set in @cmds.
*/ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, unsignedlong *cmds)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
/** * clear_exclusive_cxl_commands() - atomically enable user cxl commands * @mds: The device state to modify * @cmds: bitmap of commands to mark available for userspace
*/ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, unsignedlong *cmds)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
/** * cxl_mem_get_fw_info - Get Firmware info * @mds: The device data for the operation * * Retrieve firmware info for the device specified. * * Return: 0 if no error: or the result of the mailbox command. * * See CXL-3.0 8.2.9.3.1 Get FW Info
*/ staticint cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_fw_info info; struct cxl_mbox_cmd mbox_cmd; int rc;
/** * cxl_mem_activate_fw - Activate Firmware * @mds: The device data for the operation * @slot: slot number to activate * * Activate firmware in a given slot for the device specified. * * Return: 0 if no error: or the result of the mailbox command. * * See CXL-3.0 8.2.9.3.3 Activate FW
*/ staticint cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_activate_fw activate; struct cxl_mbox_cmd mbox_cmd;
if (slot == 0 || slot > mds->fw.num_slots) return -EINVAL;
/** * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer * @mds: The device data for the operation * * Abort an in-progress firmware transfer for the device specified. * * Return: 0 if no error: or the result of the mailbox command. * * See CXL-3.0 8.2.9.3.2 Transfer FW
*/ staticint cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
{ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; int rc;
transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL); if (!transfer) return -ENOMEM;
/* Set a 1s poll interval and a total wait time of 30s */
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_TRANSFER_FW,
.size_in = sizeof(*transfer),
.payload_in = transfer,
.poll_interval_ms = 1000,
.poll_count = 30,
};
if (cxl_mem_get_fw_info(mds)) return FW_UPLOAD_ERR_HW_ERROR;
/* * So far no state has been changed, hence no other cleanup is * necessary. Simply return the cancelled status.
*/ if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return FW_UPLOAD_ERR_CANCELED;
/* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */ if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
dev_err(&cxlmd->dev, "misaligned offset for FW transfer slice (%u)\n",
offset); return FW_UPLOAD_ERR_RW_ERROR;
}
/* * Pick transfer size based on mds->payload_size @size must bw 128-byte * aligned, ->payload_size is a power of 2 starting at 256 bytes, and * sizeof(*transfer) is 128. These constraints imply that @cur_size * will always be 128b aligned.
*/
cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer));
if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return cxl_fw_do_cancel(fwl);
/* * Slot numbers are 1-indexed * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') * Check for rollover using modulo, and 1-index it by adding 1
*/
mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
/* Do the transfer via mailbox cmd */
transfer = kzalloc(size_in, GFP_KERNEL); if (!transfer) return FW_UPLOAD_ERR_RW_ERROR;
/* * cxl_internal_send_cmd() handles background operations synchronously. * No need to wait for completions here - any errors would've been * reported and handled during the ->write() call(s). * Just check if a cancel request was received, and return success.
*/ if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) return cxl_fw_do_cancel(fwl);
cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return cxlmd;
dev = &cxlmd->dev;
rc = dev_set_name(dev, "mem%d", cxlmd->id); if (rc) goto err;
/* * Activate ioctl operations, no cxl_memdev_rwsem manipulation * needed as this is ordered with cdev_add() publishing the device.
*/
cxlmd->cxlds = cxlds;
cxlds->cxlmd = cxlmd;
err: /* * The cdev was briefly live, shutdown any ioctl operations that * saw that state.
*/
cxl_memdev_shutdown(dev);
put_device(dev); return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");
/* * Prevent new irq triggered invocations of the workqueue and * flush inflight invocations.
*/
mutex_lock(&cxl_mbox->mbox_mutex);
state = mds->security.sanitize_node;
mds->security.sanitize_node = NULL;
mutex_unlock(&cxl_mbox->mbox_mutex);
if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) return 0;
/* * Note, the expectation is that @cxlmd would have failed to be * created if these sysfs_get_dirent calls fail.
*/
sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security"); if (!sec) return -ENOENT;
mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
sysfs_put(sec); if (!mds->security.sanitize_node) return -ENOENT;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.