/* * Default and maximum allowed data transfer size. For the default, * allow up to 128 page-sized segments. For the maximum allowed, * use 4 times the default (which is completely arbitrary).
*/ #define NVMET_PCI_EPF_MAX_SEGS 128 #define NVMET_PCI_EPF_MDTS_KB \
(NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10)) #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
/* * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an * interrupt vector to the host. This default 8 is completely arbitrary and can * be changed by the host with a nvme_set_features command.
*/ #define NVMET_PCI_EPF_IV_THRESHOLD 8
/* * BAR CC register and SQ polling intervals.
*/ #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10) #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
/* * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
*/ #define NVMET_PCI_EPF_SQ_AB 8
/* * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ * is full, in which case we retry the CQ processing after this interval.
*/ #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
enum nvmet_pci_epf_queue_flags {
NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */
NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
};
/* Data transfer size and direction for the command. */
size_t data_len; enum dma_data_direction dma_dir;
/* * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we * use only @data_seg. Otherwise, the array of segments @data_segs is * allocated to manage multiple PCI address data segments. @data_sgl and * @data_sgt are used to setup the command request for execution by the * target core.
*/ unsignedint nr_data_segs; struct nvmet_pci_epf_segment data_seg; struct nvmet_pci_epf_segment *data_segs; struct scatterlist data_sgl; struct sg_table data_sgt;
/* * Note: MMIO transfers do not need serialization but this is a * simple way to avoid using too many mapping windows.
*/
mutex_lock(&nvme_epf->mmio_lock);
while (length) {
ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map); if (ret) break;
switch (dir) { case DMA_FROM_DEVICE:
memcpy_fromio(buf, map.virt_addr, map.pci_size); break; case DMA_TO_DEVICE:
memcpy_toio(map.virt_addr, buf, map.pci_size); break; default:
ret = -EINVAL; goto unlock;
}
if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) return;
mutex_lock(&ctrl->irq_lock);
if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) goto unlock;
switch (nvme_epf->irq_type) { case PCI_IRQ_MSIX: case PCI_IRQ_MSI: /* * If we fail to raise an MSI or MSI-X interrupt, it is likely * because the host is using legacy INTX IRQs (e.g. BIOS, * grub), but we can fallback to the INTX type only if the * endpoint controller supports this type.
*/
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
nvme_epf->irq_type, cq->vector + 1); if (!ret || !nvme_epf->epc_features->intx_capable) break;
fallthrough; case PCI_IRQ_INTX:
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
PCI_IRQ_INTX, 0); break; default:
WARN_ON_ONCE(1);
ret = -EINVAL; break;
}
if (ret)
dev_err_ratelimited(ctrl->dev, "CQ[%u]: Failed to raise IRQ (err=%d)\n",
cq->qid, ret);
staticvoid nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
{ int i;
if (iod->data_segs) { for (i = 0; i < iod->nr_data_segs; i++)
kfree(iod->data_segs[i].buf); if (iod->data_segs != &iod->data_seg)
kfree(iod->data_segs);
} if (iod->data_sgt.nents > 1)
sg_free_table(&iod->data_sgt);
mempool_free(iod, &iod->ctrl->iod_pool);
}
staticint nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
{ struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; int i, ret;
/* Split the data transfer according to the PCI segments. */ for (i = 0; i < iod->nr_data_segs; i++, seg++) {
ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); if (ret) {
iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; return ret;
}
}
/* * Transfer a PRP list from the host and return the number of prps.
*/ staticint nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
size_t xfer_len, __le64 *prps)
{
size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
u32 length; int ret;
/* * Compute the number of PRPs required for the number of bytes to * transfer (xfer_len). If this number overflows the memory page size * with the PRP list pointer specified, only return the space available * in the memory page, the last PRP in there will be a PRP list pointer * to the remaining PRPs.
*/
length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); if (ret) return ret;
prps = kzalloc(ctrl->mps, GFP_KERNEL); if (!prps) goto err_internal;
/* * Allocate PCI segments for the command: this considers the worst case * scenario where all prps are discontiguous, so get as many segments * as we can have prps. In practice, most of the time, we will have * far less PCI segments than prps.
*/
prp = le64_to_cpu(cmd->common.dptr.prp1); if (!prp) goto err_invalid_field;
/* * Now build the PCI address segments using the PRP lists, starting * from prp2.
*/
prp = le64_to_cpu(cmd->common.dptr.prp2); if (!prp) goto err_invalid_field;
while (size < transfer_len) {
xfer_len = transfer_len - size;
if (!nr_prps) {
nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
xfer_len, prps); if (nr_prps < 0) goto err_internal;
i = 0;
ofst = 0;
}
/* Current entry */
prp = le64_to_cpu(prps[i]); if (!prp) goto err_invalid_field;
/* Did we reach the last PRP entry of the list? */ if (xfer_len > ctrl->mps && i == nr_prps - 1) { /* We need more PRPs: PRP is a list pointer. */
nr_prps = 0; continue;
}
/* Only the first PRP is allowed to have an offset. */ if (nvmet_pci_epf_prp_ofst(ctrl, prp)) goto err_invalid_offset;
if (prp != pci_addr) { /* Discontiguous prp: new segment. */
nr_segs++; if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) goto err_internal;
/* Get the PCI address segments for the command using its PRPs. */
ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); if (ofst & 0x3) {
iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; return -EINVAL;
}
/* * Transfer an SGL segment from the host and return the number of data * descriptors and the next segment descriptor, if any.
*/ staticstruct nvme_sgl_desc *
nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, struct nvme_sgl_desc *desc, unsignedint *nr_sgls)
{ struct nvme_sgl_desc *sgls;
u32 length = le32_to_cpu(desc->length); int nr_descs, ret; void *buf;
buf = kmalloc(length, GFP_KERNEL); if (!buf) return NULL;
ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
DMA_FROM_DEVICE); if (ret) {
kfree(buf); return NULL;
}
sgls = buf;
nr_descs = length / sizeof(struct nvme_sgl_desc); if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { /* * We have another SGL segment following this one: do not count * it as a regular data SGL descriptor and return it to the * caller.
*/
*desc = sgls[nr_descs - 1];
nr_descs--;
} else { /* We do not have another SGL segment after this one. */
desc->length = 0;
}
*nr_sgls = nr_descs;
return sgls;
}
staticint nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, struct nvmet_pci_epf_iod *iod)
{ struct nvme_command *cmd = &iod->cmd; struct nvme_sgl_desc seg = cmd->common.dptr.sgl; struct nvme_sgl_desc *sgls = NULL; int n = 0, i, nr_sgls; int ret;
/* * We do not support inline data nor keyed SGLs, so we should be seeing * only segment descriptors.
*/ if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; return -EIO;
}
while (seg.length) {
sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); if (!sgls) {
iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; return -EIO;
}
/* Grow the PCI segment table as needed. */
ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls); if (ret) {
iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out;
}
/* * Parse the SGL descriptors to build the PCI segment table, * checking the descriptor type as we go.
*/ for (i = 0; i < nr_sgls; i++) { if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
iod->status = NVME_SC_SGL_INVALID_TYPE |
NVME_STATUS_DNR; goto out;
}
iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
n++;
}
kfree(sgls);
}
out: if (iod->status != NVME_SC_SUCCESS) {
kfree(sgls); return -EIO;
}
/* * Get the PCI address segments for the command data buffer using either * its SGLs or PRPs.
*/ if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
ret = nvmet_pci_epf_iod_parse_sgls(iod); else
ret = nvmet_pci_epf_iod_parse_prps(iod); if (ret) return ret;
/* Get a command buffer using SGLs matching the PCI segments. */ if (iod->nr_data_segs == 1) {
sg_init_table(&iod->data_sgl, 1);
iod->data_sgt.sgl = &iod->data_sgl;
iod->data_sgt.nents = 1;
iod->data_sgt.orig_nents = 1;
} else {
ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
GFP_KERNEL); if (ret) goto err_nomem;
}
for_each_sgtable_sg(&iod->data_sgt, sg, i) {
seg = &iod->data_segs[i];
seg->buf = kmalloc(seg->length, GFP_KERNEL); if (!seg->buf) goto err_nomem;
sg_set_buf(sg, seg->buf, seg->length);
}
/* Print an error message for failed commands, except AENs. */
iod->status = le16_to_cpu(iod->cqe.status) >> 1; if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
dev_err(iod->ctrl->dev, "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
iod->sq->qid, nvmet_pci_epf_iod_name(iod),
iod->cmd.common.opcode, iod->status);
/* * Add the command to the list of completed commands and schedule the * CQ work.
*/
spin_lock_irqsave(&cq->lock, flags);
list_add_tail(&iod->link, &cq->list);
queue_delayed_work(system_highpri_wq, &cq->work, 0);
spin_unlock_irqrestore(&cq->lock, flags);
}
/* * If the command failed or we have no data to transfer, complete the * command immediately.
*/ if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
nvmet_pci_epf_complete_iod(iod); return;
}
if (flags & NVME_CQ_IRQ_ENABLED) {
cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); if (!cq->iv) return NVME_SC_INTERNAL | NVME_STATUS_DNR;
set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}
status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); if (status != NVME_SC_SUCCESS) goto err;
/* * Map the CQ PCI address space and since PCI endpoint controllers may * return a partial mapping, check that the mapping is large enough.
*/
ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
&cq->pci_map); if (ret) {
dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
cq->qid, ret); goto err_internal;
}
if (cq->pci_map.pci_size < cq->pci_size) {
dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
cq->qid); goto err_unmap_queue;
}
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector); else
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ disabled\n",
cqid, qsize, cq->qes);
return NVME_SC_SUCCESS;
err_unmap_queue:
nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
err_internal:
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
err: if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); return status;
}
case NVME_FEAT_IRQ_COALESCE: /* * Since we do not implement precise IRQ coalescing timing, * ignore the time field.
*/
irqc = data;
ctrl->irq_vector_threshold = irqc->thr + 1; return NVME_SC_SUCCESS;
case NVME_FEAT_IRQ_CONFIG:
irqcfg = data;
mutex_lock(&ctrl->irq_lock);
iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); if (iv) {
iv->cd = irqcfg->cd;
status = NVME_SC_SUCCESS;
} else {
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
mutex_unlock(&ctrl->irq_lock); return status;
/* * If nvmet_req_init() fails (e.g., unsupported opcode) it will call * __nvmet_req_complete() internally which will call * nvmet_pci_epf_queue_response() and will complete the command directly.
*/ if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) return;
iod->data_len = nvmet_req_transfer_len(req); if (iod->data_len) { /* * Get the data DMA transfer direction. Here "device" means the * PCI root-complex host.
*/ if (nvme_is_write(&iod->cmd))
iod->dma_dir = DMA_FROM_DEVICE; else
iod->dma_dir = DMA_TO_DEVICE;
/* * Setup the command data buffer and get the command data from * the host if needed.
*/
ret = nvmet_pci_epf_alloc_iod_data_buf(iod); if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
ret = nvmet_pci_epf_transfer_iod_data(iod); if (ret) {
nvmet_req_uninit(req); goto complete;
}
}
req->execute(req);
/* * If we do not have data to transfer after the command execution * finishes, nvmet_pci_epf_queue_response() will complete the command * directly. No need to wait for the completion in this case.
*/ if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) return;
staticint nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, struct nvmet_pci_epf_queue *sq)
{ struct nvmet_pci_epf_iod *iod; int ret, n = 0;
u16 head = sq->head;
sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
iod = nvmet_pci_epf_alloc_iod(sq); if (!iod) break;
/* Get the NVMe command submitted by the host. */
ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
sq->pci_addr + head * sq->qes,
sq->qes, DMA_FROM_DEVICE); if (ret) { /* Not much we can do... */
nvmet_pci_epf_free_iod(iod); break;
}
staticvoid nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
{ struct nvmet_pci_epf_ctrl *ctrl =
container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); struct nvmet_pci_epf_queue *sq; unsignedlong limit = jiffies; unsignedlong last = 0; int i, nr_sqs;
while (ctrl->link_up && ctrl->enabled) {
nr_sqs = 0; /* Do round-robin arbitration. */ for (i = 0; i < ctrl->nr_queues; i++) {
sq = &ctrl->sq[i]; if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) continue; if (nvmet_pci_epf_process_sq(ctrl, sq))
nr_sqs++;
}
/* * If we have been running for a while, reschedule to let other * tasks run and to avoid RCU stalls.
*/ if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
cond_resched();
limit = jiffies; continue;
}
if (nr_sqs) {
last = jiffies; continue;
}
/* * If we have not received any command on any queue for more * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and * reschedule. This avoids "burning" a CPU when the controller * is idle for a long time.
*/ if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE)) break;
/* * Post the IOD completion entry. If the IOD request was * executed (req->execute() called), the CQE is already * initialized. However, the IOD may have been failed before * that, leaving the CQE not properly initialized. So always * initialize it here.
*/
cqe = &iod->cqe;
cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head));
cqe->sq_id = cpu_to_le16(iod->sq->qid);
cqe->command_id = iod->cmd.common.command_id;
cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
dev_dbg(ctrl->dev, "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
cq->phase);
/* Signal the host. */
nvmet_pci_epf_raise_irq(ctrl, cq, false);
n++;
}
/* * We do not support precise IRQ coalescing time (100ns units as per * NVMe specifications). So if we have posted completion entries without * reaching the interrupt coalescing threshold, raise an interrupt.
*/ if (n)
nvmet_pci_epf_raise_irq(ctrl, cq, true);
if (ret < 0)
queue_delayed_work(system_highpri_wq, &cq->work,
NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
}
ctrl->tctrl = nvmet_alloc_ctrl(&args); if (!ctrl->tctrl) {
dev_err(ctrl->dev, "Failed to create target controller\n");
ret = -ENOMEM; goto out_mempool_exit;
}
ctrl->tctrl->drvdata = ctrl;
/* We do not support protection information for now. */ if (ctrl->tctrl->pi_support) {
dev_err(ctrl->dev, "Protection information (PI) is not supported\n");
ret = -ENOTSUPP; goto out_put_ctrl;
}
/* Allocate our queues, up to the maximum number. */
ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
ret = nvmet_pci_epf_alloc_queues(ctrl); if (ret) goto out_put_ctrl;
/* * Allocate the IRQ vectors descriptors. We cannot have more than the * maximum number of queues.
*/
ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); if (ret) goto out_free_queues;
/* * The first free BAR will be our register BAR and per NVMe * specifications, it must be BAR 0.
*/ if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
dev_err(&epf->dev, "BAR 0 is not free\n"); return -ENODEV;
}
/* * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type * is required to be 64-bit. Thus, for interoperability, always set the * type to 64-bit. In the rare case that the PCI EPC does not support * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, * and we will return failure back to the user.
*/
epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
/* * Calculate the size of the register bar: NVMe registers first with * enough space for the doorbells, followed by the MSI-X table * if supported.
*/
reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
reg_size = ALIGN(reg_size, 8);
if (epc_features->msix_capable) {
size_t pba_size;
/* For now, do not support virtual functions. */ if (epf->vfunc_no > 0) {
dev_err(&epf->dev, "Virtual functions are not supported\n"); return -EINVAL;
}
/* * Cap the maximum number of queues we can support on the controller * with the number of IRQs we can use.
*/ if (epc_features->msix_capable && epf->msix_interrupts) {
dev_info(&epf->dev, "PCI endpoint controller supports MSI-X, %u vectors\n",
epf->msix_interrupts);
max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
} elseif (epc_features->msi_capable && epf->msi_interrupts) {
dev_info(&epf->dev, "PCI endpoint controller supports MSI, %u vectors\n",
epf->msi_interrupts);
max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
}
if (max_nr_queues < 2) {
dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
max_nr_queues); return -EINVAL;
}
/* Create the target controller. */
ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues); if (ret) {
dev_err(&epf->dev, "Failed to create NVMe PCI target controller (err=%d)\n",
ret); return ret;
}
/* Set device ID, class, etc. */
epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
epf->header); if (ret) {
dev_err(&epf->dev, "Failed to write configuration header (err=%d)\n", ret); goto out_destroy_ctrl;
}
ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
&epf->bar[BAR_0]); if (ret) {
dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); goto out_destroy_ctrl;
}
/* * Enable interrupts and start polling the controller BAR if we do not * have a link up notifier.
*/
ret = nvmet_pci_epf_init_irq(nvme_epf); if (ret) goto out_clear_bar;
if (!epc_features->linkup_notifier)
nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.