/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. *
********************************************************************/ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h>
/** * lpfc_nvme_create_queue - * @pnvme_lport: Transport localport that LS is to be issued from * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. * @qsize: Size of the queue in bytes * @handle: An opaque driver handle used in follow-up calls. * * Driver registers this routine to preallocate and initialize any * internal data structures to bind the @qidx to its internal IO queues. * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. * * Return value : * 0 - Success * -EINVAL - Unsupported input value. * -ENOMEM - Could not alloc necessary memory
**/ staticint
lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, unsignedint qidx, u16 qsize, void **handle)
{ struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_nvme_qhandle *qhandle; char *str;
qhandle->cpu_id = raw_smp_processor_id();
qhandle->qidx = qidx; /* * NVME qidx == 0 is the admin queue, so both admin queue * and first IO queue will use MSI-X vector and associated * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
*/ if (qidx) {
str = "IO "; /* IO queue */
qhandle->index = ((qidx - 1) %
lpfc_nvme_template.max_hw_queues);
} else {
str = "ADM"; /* Admin queue */
qhandle->index = qidx;
}
/** * lpfc_nvme_delete_queue - * @pnvme_lport: Transport localport that LS is to be issued from * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. * @handle: An opaque driver handle from lpfc_nvme_create_queue * * Driver registers this routine to free * any internal data structures to bind the @qidx to its internal * IO queues. * * Return value : * 0 - Success * TODO: What are the failure codes.
**/ staticvoid
lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, unsignedint qidx, void *handle)
{ struct lpfc_nvme_lport *lport; struct lpfc_vport *vport;
/* release any threads waiting for the unreg to complete */ if (lport->vport->localport)
complete(lport->lport_unreg_cmp);
}
/* lpfc_nvme_remoteport_delete * * @remoteport: Pointer to an nvme transport remoteport instance. * * This is a template downcall. NVME transport calls this function * when it has completed the unregistration of a previously * registered remoteport. * * Return value : * None
*/ staticvoid
lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{ struct lpfc_nvme_rport *rport = remoteport->private; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp;
u32 fc4_xpt_flags;
/* Remove this rport from the lport's list - memory is owned by the * transport. Remove the ndlp reference for the NVME transport before * calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6146 remoteport delete of remoteport x%px, ndlp x%px " "DID x%x xflags x%x\n",
remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete * downcall. Guard against this race.
*/ if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
spin_unlock_irq(&ndlp->lock);
/* On a devloss timeout event, one more put is executed provided the * NVME and SCSI rport unregister requests are complete.
*/ if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
rport_err: return;
}
/** * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * * This routine is used for processing an asynchronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvme-fc transport via nvme_fc_rcv_ls_req(). * * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. * * Returns 0 if LS was handled and delivered to the transport * Returns 1 if LS failed to be handled and should be dropped
*/ int
lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, struct lpfc_async_xchg_ctx *axchg)
{ #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_vport *vport; struct lpfc_nvme_rport *lpfc_rport; struct nvme_fc_remote_port *remoteport; struct lpfc_nvme_lport *lport;
uint32_t *payload = axchg->payload; int rc;
vport = axchg->ndlp->vport;
lpfc_rport = axchg->ndlp->nrport; if (!lpfc_rport) return -EINVAL;
remoteport = lpfc_rport->remoteport; if (!vport->localport ||
test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -EINVAL;
lport = vport->localport->private; if (!lport) return -EINVAL;
/** * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME * LS request. * @phba: Pointer to HBA context object * @vport: The local port that issued the LS * @cmdwqe: Pointer to driver command WQE object. * @wcqe: Pointer to driver response CQE object. * * This function is the generic completion handler for NVME LS requests. * The function updates any states and statistics, calls the transport * ls_req done() routine, then tears down the command and buffers used * for the LS request.
**/ void
__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe)
{ struct nvmefc_ls_req *pnvme_lsreq; struct lpfc_dmabuf *buf_ptr; struct lpfc_nodelist *ndlp; int status;
/* Save for completion so we can release these resources */
genwqe->ndlp = lpfc_nlp_get(ndlp); if (!genwqe->ndlp) {
dev_warn(&phba->pcidev->dev, "Warning: Failed node ref, not sending LS_REQ\n");
lpfc_sli_release_iocbq(phba, genwqe); return 1;
}
genwqe->context_un.nvme_lsreq = pnvme_lsreq; /* Fill in payload, bp points to frame payload */
if (!tmo) /* FC spec states we need 3 * ratov for CT requests */
tmo = (3 * phba->fc_ratov);
/* For this command calculate the xmit length of the request bde. */
xmit_len = 0;
first_len = 0;
bpl = (struct ulp_bde64 *)bmp->virt; for (i = 0; i < num_entry; i++) {
bde.tus.w = bpl[i].tus.w; if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) break;
xmit_len += bde.tus.f.bdeSize; if (i == 0)
first_len = xmit_len;
}
/** * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request * @vport: The local port issuing the LS * @ndlp: The remote port to send the LS to * @pnvme_lsreq: Pointer to LS request structure from the transport * @gen_req_cmp: Completion call-back * * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST * WQE to perform the LS operation. * * Return value : * 0 - Success * non-zero: various error codes, in form of -Exxx
**/ int
__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq, void (*gen_req_cmp)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe))
{ struct lpfc_dmabuf *bmp; struct ulp_bde64 *bpl; int ret;
uint16_t ntype, nstate;
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " "LS Req\n",
ndlp); return -ENODEV;
}
ntype = ndlp->nlp_type;
nstate = ndlp->nlp_state; if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6088 NVMEx LS REQ: Fail DID x%06x not " "ready for IO. Type x%x, State x%x\n",
ndlp->nlp_DID, ntype, nstate); return -ENODEV;
} if (test_bit(HBA_IOQ_FLUSH, &vport->phba->hba_flag)) return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq) return -ENOMEM;
/* * there are two dma buf in the request, actually there is one and * the second one is just the start address + cmd size. * Before calling lpfc_nvme_gen_req these buffers need to be wrapped * in a lpfc_dmabuf struct. When freeing we just free the wrapper * because the nvem layer owns the data bufs. * We do not have to break these packets open, we don't care what is * in them. And we do not have to look at the resonse data, we only * care that we got a response. All of the caring is going to happen * in the nvme-fc layer.
*/
bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6044 NVMEx LS REQ: Could not alloc LS buf " "for DID %x\n",
ndlp->nlp_DID); return -ENOMEM;
}
bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); if (!bmp->virt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6042 NVMEx LS REQ: Could not alloc mbuf " "for DID %x\n",
ndlp->nlp_DID);
kfree(bmp); return -ENOMEM;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " "rqstlen:%d rsplen:%d %pad %pad\n",
ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
pnvme_lsreq, gen_req_cmp, ndlp, 2,
pnvme_lsreq->timeout, 0); if (ret != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6052 NVMEx REQ: EXIT. issue ls wqe failed " "lsreq x%px Status %x DID %x\n",
pnvme_lsreq, ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
kfree(bmp); return -EIO;
}
return 0;
}
/** * lpfc_nvme_ls_req - Issue an NVME Link Service request * @pnvme_lport: Transport localport that LS is to be issued from. * @pnvme_rport: Transport remoteport that LS is to be sent to. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to handle any link service request * from the nvme_fc transport to a remote nvme-aware port. * * Return value : * 0 - Success * non-zero: various error codes, in form of -Exxx
**/ staticint
lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, struct nvmefc_ls_req *pnvme_lsreq)
{ struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; struct lpfc_vport *vport; int ret;
ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
lpfc_nvme_ls_req_cmp); if (ret)
atomic_inc(&lport->xmt_ls_err);
return ret;
}
/** * __lpfc_nvme_ls_abort - Generic service routine to abort a prior * NVME LS request * @vport: The local port that issued the LS * @ndlp: The remote port the LS was sent to * @pnvme_lsreq: Pointer to LS request structure from the transport * * The driver validates the ndlp, looks for the LS, and aborts the * LS if found. * * Returns: * 0 : if LS found and aborted * non-zero: various error conditions in form -Exxx
**/ int
__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq)
{ struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring; struct lpfc_iocbq *wqe, *next_wqe; bool foundit = false;
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " "x%06x, Failing LS Req\n",
ndlp, ndlp ? ndlp->nlp_DID : 0); return -EINVAL;
}
/* * Lock the ELS ring txcmplq and look for the wqe that matches * this ELS. If found, issue an abort on the wqe.
*/
pring = phba->sli4_hba.nvmels_wq->pring;
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true; break;
}
}
spin_unlock(&pring->ring_lock);
if (foundit)
lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
spin_unlock_irq(&phba->hbalock);
if (foundit) return 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
pnvme_lsreq); return -EINVAL;
}
if (rc) { /* * unless the failure is due to having already sent * the response, an abort will be generated for the * exchange if the rsp can't be sent.
*/ if (rc != -EALREADY)
atomic_inc(&lport->xmt_ls_abort); return rc;
}
return 0;
}
/** * lpfc_nvme_ls_abort - Abort a prior NVME LS request * @pnvme_lport: Transport localport that LS is to be issued from. * @pnvme_rport: Transport remoteport that LS is to be sent to. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to abort a NVME LS request that is * in progress (from the transports perspective).
**/ staticvoid
lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, struct nvmefc_ls_req *pnvme_lsreq)
{ struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; int ret;
ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); if (!ret)
atomic_inc(&lport->xmt_ls_abort);
}
/* Fix up the existing sgls for NVME IO. */ staticinlinevoid
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd, struct nvmefc_fcp_req *nCmd)
{ struct lpfc_hba *phba = vport->phba; struct sli4_sge *sgl; union lpfc_wqe128 *wqe;
uint32_t *wptr, *dptr;
/* * Get a local pointer to the built-in wqe and correct * the cmd size to match NVME's 96 bytes and fix * the dma address.
*/
wqe = &lpfc_ncmd->cur_iocbq.wqe;
/* * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to * match NVME. NVME sends 96 bytes. Also, use the * nvme commands command and response dma addresses * rather than the virtual memory to ease the restore * operation.
*/
sgl = lpfc_ncmd->dma_sgl;
sgl->sge_len = cpu_to_le32(nCmd->cmdlen); if (phba->cfg_nvme_embed_cmd) {
sgl->addr_hi = 0;
sgl->addr_lo = 0;
/* Word 0-2 - NVME CMND IU (embedded payload) */
wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
wqe->generic.bde.tus.f.bdeSize = 56;
wqe->generic.bde.addrHigh = 0;
wqe->generic.bde.addrLow = 64; /* Word 16 */
/* Word 10 - dbde is 0, wqes is 1 in template */
/* * Embed the payload in the last half of the WQE * WQE words 16-30 get the NVME CMD IU payload * * WQE words 16-19 get payload Words 1-4 * WQE words 20-21 get payload Words 6-7 * WQE words 22-29 get payload Words 16-23
*/
wptr = &wqe->words[16]; /* WQE ptr */
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
dptr++; /* Skip Word 0 in payload */
*wptr++ = *dptr++; /* Word 1 */
*wptr++ = *dptr++; /* Word 2 */
*wptr++ = *dptr++; /* Word 3 */
*wptr++ = *dptr++; /* Word 4 */
dptr++; /* Skip Word 5 in payload */
*wptr++ = *dptr++; /* Word 6 */
*wptr++ = *dptr++; /* Word 7 */
dptr += 8; /* Skip Words 8-15 in payload */
*wptr++ = *dptr++; /* Word 16 */
*wptr++ = *dptr++; /* Word 17 */
*wptr++ = *dptr++; /* Word 18 */
*wptr++ = *dptr++; /* Word 19 */
*wptr++ = *dptr++; /* Word 20 */
*wptr++ = *dptr++; /* Word 21 */
*wptr++ = *dptr++; /* Word 22 */
*wptr = *dptr; /* Word 23 */
} else {
sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
if (unlikely(status && vport->localport)) {
lport = (struct lpfc_nvme_lport *)vport->localport->private; if (lport) { if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
}
}
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
status, wcqe->parameter); /* * Catch race where our node has transitioned, but the * transport is still transitioning.
*/
ndlp = lpfc_ncmd->ndlp; if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6062 Ignoring NVME cmpl. No ndlp\n"); goto out_err;
}
code = bf_get(lpfc_wcqe_c_code, wcqe); if (code == CQE_CODE_NVME_ERSP) { /* For this type of CQE, we need to rebuild the rsp */
ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
/* * Get Command Id from cmd to plug into response. This * code is not needed in the next NVME Transport drop.
*/
cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
cid = cp->sqe.common.command_id;
/* * RSN is in CQE word 2 * SQHD is in CQE Word 3 bits 15:0 * Cmd Specific info is in CQE Word 1 * and in CQE Word 0 bits 15:0
*/
sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
/* For NVME, the only failure path that results in an * IO error is when the adapter rejects it. All other * conditions are a success case and resolved by the * transport. * IOSTAT_FCP_RSP_ERROR means: * 1. Length of data received doesn't match total * transfer length in WQE * 2. If the RSP payload does NOT match these cases: * a. RSP length 12/24 bytes and all zeros * b. NVME ERSP
*/ switch (lpfc_ncmd->status) { case IOSTAT_SUCCESS:
nCmd->transferred_length = wcqe->total_data_placed;
nCmd->rcv_rsplen = 0;
nCmd->status = 0; break; case IOSTAT_FCP_RSP_ERROR:
nCmd->transferred_length = wcqe->total_data_placed;
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
/* Get the NVME cmd details for this unique error. */
cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
/* Check if this is really an ERSP */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
lpfc_ncmd->status = IOSTAT_SUCCESS;
lpfc_ncmd->result = 0;
/* pick up SLI4 exhange busy condition */ if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
/* Update stats and complete the IO. There is * no need for dma unprep because the nvme_transport * owns the dma address.
*/ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
lpfc_ncmd->ts_data_io = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
lpfc_io_ktime(phba, lpfc_ncmd);
} if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
cpu = raw_smp_processor_id();
this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_NVME_IOERR, "6701 CPU Check cmpl: " "cpu %d expect %d\n",
cpu, lpfc_ncmd->cpu);
} #endif
/* NVME targets need completion held off until the abort exchange * completes unless the NVME Rport is getting unregistered.
*/
/* Check if IO qualified for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
nCmd->io_dir == NVMEFC_FCP_READ &&
nCmd->payload_length) { /* Used when calculating average latency */
lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
}
if (call_done)
nCmd->done(nCmd);
/* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
/** * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO * @vport: pointer to a host virtual N_Port data structure * @lpfc_ncmd: Pointer to lpfc scsi command * @pnode: pointer to a node-list data structure * @cstat: pointer to the control status structure * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes.
**/ staticint
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd, struct lpfc_nodelist *pnode, struct lpfc_fc4_ctrl_stat *cstat)
{ struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; struct nvme_common_command *sqe; struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
/* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither.
*/ if (nCmd->sg_cnt) { if (nCmd->io_dir == NVMEFC_FCP_WRITE) { /* From the iwrite template, initialize words 7 - 11 */
memcpy(&wqe->words[7],
&lpfc_iwrite_cmd_template.words[7], sizeof(uint32_t) * 5);
/* Word 4 */
wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
/* Word 5 */ if ((phba->cfg_nvme_enable_fb) &&
test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size)
wqe->fcp_iwrite.initial_xfer_len =
req_len; else
wqe->fcp_iwrite.initial_xfer_len =
pnode->nvme_fb_size;
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
cstat->output_requests++;
} else { /* From the iread template, initialize words 7 - 11 */
memcpy(&wqe->words[7],
&lpfc_iread_cmd_template.words[7], sizeof(uint32_t) * 5);
/* Word 4 */
wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
/* Word 5 */
wqe->fcp_iread.rsrvd5 = 0;
/* For a CMF Managed port, iod must be zero'ed */ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
LPFC_WQE_IOD_NONE);
cstat->input_requests++;
}
} else { /* From the icmnd template, initialize words 4 - 11 */
memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], sizeof(uint32_t) * 8);
cstat->control_requests++;
}
/* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer
*/
/* Word 3 */
bf_set(payload_offset_len, &wqe->fcp_icmd,
(nCmd->rsplen + nCmd->cmdlen));
/* Word 6 */
bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
/* Word 8 */
wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
/* Word 9 */
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
/* Word 10 */
bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
/* Words 13 14 15 are for PBDE support */
/* add the VMID tags as per switch response */ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) { if (phba->pport->vmid_priority_tagging) {
bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
} else {
bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
}
}
pwqeq->vport = vport; return 0;
}
/** * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO * @vport: pointer to a host virtual N_Port data structure * @lpfc_ncmd: Pointer to lpfc scsi command * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes.
**/ staticint
lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_ncmd)
{ struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; struct sli4_hybrid_sgl *sgl_xtra = NULL; struct scatterlist *data_sg; struct sli4_sge *first_data_sgl; struct ulp_bde64 *bde;
dma_addr_t physaddr = 0;
uint32_t dma_len = 0;
uint32_t dma_offset = 0; int nseg, i, j; bool lsp_just_set = false;
/* Fix up the command and response DMA stuff. */
lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
/* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither.
*/ if (nCmd->sg_cnt) { /* * Jump over the cmd and rsp SGEs. The fix routine * has already adjusted for this.
*/
sgl += 2;
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt; if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6058 Too many sg segments from " "NVME Transport. Max %d, " "nvmeIO sg_cnt %d\n",
phba->cfg_nvme_seg_cnt + 1,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0; return 1;
}
/* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single nvme command. Just run through the seg_cnt and format * the sge's.
*/
nseg = nCmd->sg_cnt;
data_sg = nCmd->first_sgl;
/* for tracking the segment boundaries */
j = 2; for (i = 0; i < nseg; i++) { if (data_sg == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6059 dptr err %d, nseg %d\n",
i, nseg);
lpfc_ncmd->seg_cnt = 0; return 1;
}
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
i = i - 1;
lsp_just_set = true;
}
j++;
}
/* PBDE support for first data SGE only */ if (nseg == 1 && phba->cfg_enable_pbde) { /* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
bde->addrHigh = first_data_sgl->addr_hi;
bde->tus.f.bdeSize =
le32_to_cpu(first_data_sgl->sge_len);
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
/* Word 11 - set PBDE bit */
bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); /* Word 11 - PBDE bit disabled by default template */
}
} else {
lpfc_ncmd->seg_cnt = 0;
/* For this clause to be valid, the payload_length * and sg_cnt must zero.
*/ if (nCmd->payload_length != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6063 NVME DMA Prep Err: sg_cnt %d " "payload_length x%x\n",
nCmd->sg_cnt, nCmd->payload_length); return 1;
}
} return 0;
}
/** * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO * @pnvme_lport: Pointer to the driver's local port data * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue * @pnvme_fcreq: IO request from nvme fc to driver. * * Driver registers this routine as it io request handler. This * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. * * Return value : * 0 - Success * TODO: What are the failure codes.
**/ staticint
lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, void *hw_queue_handle, struct nvmefc_fcp_req *pnvme_fcreq)
{ int ret = 0; int expedite = 0; int idx, cpu; struct lpfc_nvme_lport *lport; struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_nodelist *ndlp; struct lpfc_io_buf *lpfc_ncmd; struct lpfc_nvme_rport *rport; struct lpfc_nvme_qhandle *lpfc_queue_info; struct lpfc_nvme_fcpreq_priv *freqpriv; struct nvme_common_command *sqe;
uint64_t start = 0; #if (IS_ENABLED(CONFIG_NVME_FC))
u8 *uuid = NULL; int err; enum dma_data_direction iodir; #endif
/* Validate pointers. LLDD fault handling with transport does * have timing races.
*/
lport = (struct lpfc_nvme_lport *)pnvme_lport->private; if (unlikely(!lport)) {
ret = -EINVAL; goto out_fail;
}
vport = lport->vport;
if (unlikely(!hw_queue_handle)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6117 Fail IO, NULL hw_queue_handle\n");
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY; goto out_fail;
}
/* * Catch race where our node has transitioned, but the * transport is still transitioning.
*/
ndlp = rport->ndlp; if (!ndlp) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, "6053 Busy IO, ndlp not ready: rport x%px " "ndlp x%px, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY; goto out_fail;
}
/* The remote node has to be a mapped target or it's an error. */ if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, "6036 Fail IO, DID x%06x not ready for " "IO. State x%x, Type x%x Flg x%x\n",
pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type,
ndlp->fc4_xpt_flags);
atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -EBUSY; goto out_fail;
}
/* Currently only NVME Keep alive commands should be expedited * if the driver runs out of a resource. These should only be * issued on the admin queue, qidx 0
*/ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
sqe = &((struct nvme_fc_cmd_iu *)
pnvme_fcreq->cmdaddr)->sqe.common; if (sqe->opcode == nvme_admin_keep_alive)
expedite = 1;
}
/* Check if IO qualifies for CMF */ if (phba->cmf_active_mode != LPFC_CFG_OFF &&
pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
pnvme_fcreq->payload_length) {
ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); if (ret) {
ret = -EBUSY; goto out_fail;
} /* Get start time for IO latency */
start = ktime_get_ns();
}
/* The node is shared with FCP IO, make sure the IO pending count does * not exceed the programmed depth.
*/ if (lpfc_ndlp_check_qdepth(phba, ndlp)) { if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
!expedite) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6174 Fail IO, ndlp qdepth exceeded: " "idx %d DID %x pend %d qdepth %d\n",
lpfc_queue_info->index, ndlp->nlp_DID,
atomic_read(&ndlp->cmd_pending),
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY; goto out_fail1;
}
}
/* Lookup Hardware Queue index based on fcp_io_sched module parameter */ if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
idx = lpfc_queue_info->index;
} else {
cpu = raw_smp_processor_id();
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); if (lpfc_ncmd == NULL) {
atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6065 Fail IO, driver buffer pool is empty: " "idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
ret = -EBUSY; goto out_fail1;
} #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (start) {
lpfc_ncmd->ts_cmd_start = start;
lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
} else {
lpfc_ncmd->ts_cmd_start = 0;
} #endif
lpfc_ncmd->rx_cmd_start = start;
/* * Store the data needed by the driver to issue, abort, and complete * an IO. * Do not let the IO hang out forever. There is no midlayer issuing * an abort so inform the FW of the maximum IO pending time.
*/
freqpriv->nvme_buf = lpfc_ncmd;
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
#if (IS_ENABLED(CONFIG_NVME_FC)) /* check the necessary and sufficient condition to support VMID */ if (lpfc_is_vmid_enabled(phba) &&
(ndlp->vmid_support ||
phba->pport->vmid_priority_tagging ==
LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { /* is the I/O generated by a VM, get the associated virtual */ /* entity id */
uuid = nvme_fc_io_getuuid(pnvme_fcreq);
/* * Issue the IO on the WQ indicated by index in the hw_queue_handle. * This identfier was create in our hardware queue create callback * routine. The driver now is dependent on the IO queue steering from * the transport. We are trusting the upper NVME layers know which * index to use and that they have affinitized a CPU to this hardware * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
*/
lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); if (ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6175 Fail IO, Prep DMA: " "idx %d DID %x\n",
lpfc_queue_info->index, ndlp->nlp_DID);
atomic_inc(&lport->xmt_fcp_err);
ret = -ENOMEM; goto out_free_nvme_buf;
}
/** * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS * @pnvme_lport: Pointer to the driver's local port data * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue * @pnvme_fcreq: IO request from nvme fc to driver. * * Driver registers this routine as its nvme request io abort handler. This * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq * data structure to the rport indicated in @lpfc_nvme_rport. This routine * is executed asynchronously - one the target is validated as "MAPPED" and * ready for IO, the driver issues the abort request and returns. * * Return value: * None
**/ staticvoid
lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct nvme_fc_remote_port *pnvme_rport, void *hw_queue_handle, struct nvmefc_fcp_req *pnvme_fcreq)
{ struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_io_buf *lpfc_nbuf; struct lpfc_iocbq *nvmereq_wqe; struct lpfc_nvme_fcpreq_priv *freqpriv; unsignedlong flags; int ret_val;
/* Validate pointers. LLDD fault handling with transport does * have timing races.
*/
lport = (struct lpfc_nvme_lport *)pnvme_lport->private; if (unlikely(!lport)) return;
if (unlikely(!freqpriv)) return; if (test_bit(FC_UNLOADING, &vport->load_flag)) return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6002 Abort Request to rport DID x%06x " "for nvme_fc_req x%px\n",
pnvme_rport->port_id,
pnvme_fcreq);
lpfc_nbuf = freqpriv->nvme_buf; if (!lpfc_nbuf) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6140 NVME IO req has no matching lpfc nvme " "io buffer. Skipping abort req.\n"); return;
} elseif (!lpfc_nbuf->nvmeCmd) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6141 lpfc NVME IO req has no nvme_fcreq " "io buffer. Skipping abort req.\n"); return;
}
/* driver queued commands are in process of being flushed */ if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6139 Driver in reset cleanup - flushing " "NVME Req now. hba_flag x%lx\n",
phba->hba_flag); return;
}
/* Guard against IO completion being called at same time */
spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
spin_lock(&phba->hbalock);
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* * The lpfc_nbuf and the mapped nvme_fcreq in the driver's * state must match the nvme_fcreq passed by the nvme * transport. If they don't match, it is likely the driver * has already completed the NVME IO and the nvme transport * has not seen it yet.
*/ if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6143 NVME req mismatch: " "lpfc_nbuf x%px nvmeCmd x%px, " "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock;
}
/* Don't abort IOs no longer on the pending queue. */ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6142 NVME IO req x%px not queued - skipping " "abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock;
}
/* Sizes of additional private data for data structures. * No use for the last two sizes at this time.
*/
.local_priv_sz = sizeof(struct lpfc_nvme_lport),
.remote_priv_sz = sizeof(struct lpfc_nvme_rport),
.lsrqst_priv_sz = 0,
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
/* * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA * * This routine removes a nvme buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_nvme_buf - Success
**/ staticstruct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx, int expedite)
{ struct lpfc_io_buf *lpfc_ncmd; struct lpfc_sli4_hdw_queue *qp; struct sli4_sge *sgl; struct lpfc_iocbq *pwqeq; union lpfc_wqe128 *wqe;
if (lpfc_ncmd) {
pwqeq = &(lpfc_ncmd->cur_iocbq);
wqe = &pwqeq->wqe;
/* Setup key fields in buffer that may have been changed * if other protocols used this buffer.
*/
pwqeq->cmd_flag = LPFC_IO_NVME;
pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
/* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. * The cmd is going to be embedded so we need a SKIP SGE.
*/
sgl = lpfc_ncmd->dma_sgl;
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2); /* Fill in word 3 / sgl_len during cmd submission */
/* Initialize 64 bytes only */
memset(wqe, 0, sizeof(union lpfc_wqe));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
/** * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. * @phba: The Hba for which this call is being executed. * @lpfc_ncmd: The nvme buffer which is being released. * * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted.
**/ staticvoid
lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
{ struct lpfc_sli4_hdw_queue *qp; unsignedlong iflag = 0;
if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
/** * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. * @vport: the lpfc_vport instance requesting a localport. * * This routine is invoked to create an nvme localport instance to bind * to the nvme_fc_transport. It is called once during driver load * like lpfc_create_shost after all other services are initialized. * It requires a vport, vpi, and wwns at call time. Other localport * parameters are modified as the driver's FCID and the Fabric WWN * are established. * * Return codes * 0 - successful * -ENOMEM - no heap memory available * other values - from nvme registration upcall
**/ int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{ int ret = 0; struct lpfc_hba *phba = vport->phba; struct nvme_fc_port_info nfcp_info; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport;
/* Initialize this localport instance. The vport wwn usage ensures * that NPIV is accounted for.
*/
memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
/* We need to tell the transport layer + 1 because it takes page * alignment into account. When space for the SGL is allocated we * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
/* Advertise how many hw queues we support based on cfg_hdw_queue, * which will not exceed cpu count.
*/
lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC)) return ret;
/* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area.
*/
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport); if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, "6005 Successfully registered local " "NVME port num %d, localP x%px, private " "x%px, sg_seg %d\n",
localport->port_num, localport,
localport->private,
lpfc_nvme_template.max_sgl_segments);
/* Private is our lport size declared in the template. */
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
vport->nvmei_support = 1;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.