/* * Association and Connection IDs: * * Association ID will have random number in upper 6 bytes and zero * in lower 2 bytes * * Connection IDs will be Association ID with QID or'd in lower 2 bytes * * note: Association ID = Connection ID for queue 0
*/ #define BYTES_FOR_QID sizeof(u16) #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
/* * The fcloop device passes in a NULL device pointer. Real LLD's will * pass in a valid device pointer. If NULL is passed to the dma mapping * routines, depending on the platform, it may or may not succeed, and * may crash. * * As such: * Wrapper all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. * * On more complex mappings (dma_map_sg), a pseudo routine fills * in the scatter list, setting all dma addresses to 0.
*/
/* fc-nvme target doesn't care about success or failure of cmd */
}
/* * This routine sends a FC-NVME LS to disconnect (aka terminate) * the FC-NVME Association. Terminating the association also * terminates the FC-NVME connections (per queue, both admin and io * queues) that are part of the association. E.g. things are torn * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * * The behavior of the fc-nvme target is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme host, so the target may never get a * response even if it tried. As such, the action of this routine * is to asynchronously send the LS, ignore any results of the LS, and * continue on with terminating the association. If the fc-nvme host * is present and receives the LS, it too can tear down.
*/ staticvoid
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
{ struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; struct fcnvme_ls_disconnect_assoc_acc *discon_acc; struct nvmet_fc_ls_req_op *lsop; struct nvmefc_ls_req *lsreq; int ret;
/* * If ls_req is NULL or no hosthandle, it's an older lldd and no * message is normal. Otherwise, send unless the hostport has * already been invalidated by the lldd.
*/ if (!tgtport->ops->ls_req || assoc->hostport->invalid) return;
for (i = 0; i < queue->sqsize; fod++, i++) { if (fod->rspdma)
fc_dma_unmap_single(tgtport->dev, fod->rspdma, sizeof(fod->rspiubuf), DMA_TO_DEVICE);
}
}
fod = list_first_entry_or_null(&queue->fod_list, struct nvmet_fc_fcp_iod, fcp_list); if (fod) {
list_del(&fod->fcp_list);
fod->active = true; /* * no queue reference is taken, as it was taken by the * queue lookup just prior to the allocation. The iod * will "inherit" that reference.
*/
} return fod;
}
/* * put all admin cmds on hw queue id 0. All io commands go to * the respective hw queue based on a modulo basis
*/
fcpreq->hwqid = queue->qid ?
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
/* ensure all in-flight I/Os have been processed */ for (i = NVMET_NR_QUEUES; i >= 0; i--) { if (assoc->queues[i])
flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev, "{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
/* * called when a targetport deregisters. Breaks the relationship * with the nvmet port, but leaves the port_entry in place so that * re-registration can resume operation.
*/ staticvoid
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
{ struct nvmet_fc_port_entry *pe; unsignedlong flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
pe = tgtport->pe; if (pe) {
nvmet_fc_tgtport_put(pe->tgtport);
pe->tgtport = NULL;
}
tgtport->pe = NULL;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
/* * called when a new targetport is registered. Looks in the * existing nvmet port_entries to see if the nvmet layer is * configured for the targetport's wwn's. (the targetport existed, * nvmet configured, the lldd unregistered the tgtport, and is now * reregistering the same targetport). If so, set the nvmet port * port entry on the targetport.
*/ staticvoid
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
{ struct nvmet_fc_port_entry *pe; unsignedlong flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name &&
tgtport->fc_target_port.port_name == pe->port_name) { if (!nvmet_fc_tgtport_get(tgtport)) continue;
/** * nvmet_fc_register_targetport - transport entry point called by an * LLDD to register the existence of a local * NVME subsystem FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be * used for DMA mappings * @portptr: pointer to a local port pointer. Upon success, the routine * will allocate a nvme_fc_local_port structure and place its * address in the local port pointer. Upon failure, local port * pointer will be set to NULL. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, struct nvmet_fc_target_template *template, struct device *dev, struct nvmet_fc_target_port **portptr)
{ struct nvmet_fc_tgtport *newrec; unsignedlong flags; int ret, idx;
/** * nvmet_fc_invalidate_host - transport entry point called by an LLDD * to remove references to a hosthandle for LS's. * * The nvmet-fc layer ensures that any references to the hosthandle * on the targetport are forgotten (set to NULL). The LLDD will * typically call this when a login with a remote host port has been * lost, thus LS's for the remote host port are no longer possible. * * If an LS request is outstanding to the targetport/hosthandle (or * issued concurrently with the call to invalidate the host), the * LLDD is responsible for terminating/aborting the LS and completing * the LS request. It is recommended that these terminations/aborts * occur after calling to invalidate the host handle to avoid additional * retries by the nvmet-fc transport. The nvmet-fc transport may * continue to reference host handle while it cleans up outstanding * NVME associations. The nvmet-fc transport will call the * ops->host_release() callback to notify the LLDD that all references * are complete and the related host handle can be recovered. * Note: if there are no references, the callback may be called before * the invalidate host call returns. * * @target_port: pointer to the (registered) target port that a prior * LS was received on and which supplied the transport the * hosthandle. * @hosthandle: the handle (pointer) that represents the host port * that no longer has connectivity and that LS's should * no longer be directed to.
*/ void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, void *hosthandle)
{ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_tgt_assoc *assoc, *next; unsignedlong flags; bool noassoc = true;
/* if there's nothing to wait for - call the callback */ if (noassoc && tgtport->ops->host_release)
tgtport->ops->host_release(hosthandle);
}
EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
/* * nvmet layer has called to terminate an association
*/ staticvoid
nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
{ struct nvmet_fc_tgtport *tgtport, *next; struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_queue *queue; unsignedlong flags; bool found_ctrl = false;
/* this is a bit ugly, but don't want to make locks layered */
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
tgt_list) { if (!nvmet_fc_tgtport_get(tgtport)) continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
iod = tgtport->iod; for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++)
cancel_work(&iod->work);
/* * After this point the connection is lost and thus any pending * request can't be processed by the normal completion path. This * is likely a request from nvmet_fc_send_ls_req_async.
*/ while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, struct nvmet_fc_ls_req_op, lsreq_list))) {
list_del(&lsop->lsreq_list);
/** * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously * registered a local NVME subsystem FC port. * @target_port: pointer to the (registered) target port that is to be * deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno * (ex: -ENXIO) upon failure.
*/ int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); unsignedlong flags;
/* * FC-NVME spec changes. There are initiators sending different * lengths as padding sizes for Create Association Cmd descriptor * was incorrect. * Accept anything of "minimum" length. Assume format per 1.15 * spec (with HOSTID reduced to 16 bytes), ignore how long the * trailing pad length is.
*/ if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
ret = VERR_CR_ASSOC_LEN; elseif (be32_to_cpu(rqst->desc_list_len) <
FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
ret = VERR_CR_ASSOC_RQST_LEN; elseif (rqst->assoc_cmd.desc_tag !=
cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
ret = VERR_CR_ASSOC_CMD; elseif (be32_to_cpu(rqst->assoc_cmd.desc_len) <
FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
ret = VERR_CR_ASSOC_CMD_LEN; elseif (!rqst->assoc_cmd.ersp_ratio ||
(be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
be16_to_cpu(rqst->assoc_cmd.sqsize)))
ret = VERR_ERSP_RATIO;
else { /* new association w/ admin queue */
iod->assoc = nvmet_fc_alloc_target_assoc(
tgtport, iod->hosthandle); if (!iod->assoc)
ret = VERR_ASSOC_ALLOC_FAIL; else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize)); if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
nvmet_fc_tgt_a_put(iod->assoc);
}
}
}
if (ret) {
dev_err(tgtport->dev, "Create Association LS failed: %s\n",
validation_errors[ret]);
iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0); return;
}
queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
dev_info(tgtport->dev, "{%d:%d} Association created\n",
tgtport->fc_target_port.port_num, iod->assoc->a_id);
/* * Returns true if the LS response is to be transmit * Returns false if the LS response is to be delayed
*/ staticint
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod)
{ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&iod->rqstbuf->rq_dis_assoc; struct fcnvme_ls_disconnect_assoc_acc *acc =
&iod->rspbuf->rsp_dis_assoc; struct nvmet_fc_tgt_assoc *assoc = NULL; struct nvmet_fc_ls_iod *oldls = NULL; unsignedlong flags; int ret = 0;
memset(acc, 0, sizeof(*acc));
ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); if (!ret) { /* match an active association - takes an assoc ref if !NULL */
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc; if (!assoc)
ret = VERR_NO_ASSOC;
}
/* * The rules for LS response says the response cannot * go back until ABTS's have been sent for all outstanding * I/O and a Disconnect Association LS has been sent. * So... save off the Disconnect LS to send the response * later. If there was a prior LS already saved, replace * it with the newer one and send a can't perform reject * on the older one.
*/
spin_lock_irqsave(&tgtport->lock, flags);
oldls = assoc->rcv_disconn;
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
if (oldls) {
dev_info(tgtport->dev, "{%d:%d} Multiple Disconnect Association LS's " "received\n",
tgtport->fc_target_port.port_num, assoc->a_id); /* overwrite good response with bogus failure */
oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, sizeof(*iod->rspbuf), /* ok to use rqst, LS is same */
rqst->w0.ls_cmd,
FCNVME_RJT_RC_UNAB,
FCNVME_RJT_EXP_NONE, 0);
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); if (ret)
nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
}
/* * Actual processing routine for received FC-NVME LS Requests from the LLD
*/ staticvoid
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod)
{ struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; bool sendrsp = true;
iod->lsrsp->nvme_fc_private = iod;
iod->lsrsp->rspbuf = iod->rspbuf;
iod->lsrsp->rspdma = iod->rspdma;
iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */
iod->lsrsp->rsplen = 0;
iod->assoc = NULL;
/* * handlers: * parse request input, execute the request, and format the * LS response
*/ switch (w0->ls_cmd) { case FCNVME_LS_CREATE_ASSOCIATION: /* Creates Association and initial Admin Queue/Connection */
nvmet_fc_ls_create_association(tgtport, iod); break; case FCNVME_LS_CREATE_CONNECTION: /* Creates an IO Queue/Connection */
nvmet_fc_ls_create_connection(tgtport, iod); break; case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */
sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); break; default:
iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, sizeof(*iod->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
}
if (sendrsp)
nvmet_fc_xmt_ls_rsp(tgtport, iod);
}
/* * Actual processing routine for received FC-NVME LS Requests from the LLD
*/ staticvoid
nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
{ struct nvmet_fc_ls_iod *iod =
container_of(work, struct nvmet_fc_ls_iod, work); struct nvmet_fc_tgtport *tgtport = iod->tgtport;
nvmet_fc_handle_ls_rqst(tgtport, iod);
}
/** * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD * upon the reception of a NVME LS request. * * The nvmet-fc layer will copy payload to an internal structure for * processing. As such, upon completion of the routine, the LLDD may * immediately free/reuse the LS request buffer passed in the call. * * If this routine returns error, the LLDD should abort the exchange. * * @target_port: pointer to the (registered) target port the LS was * received on. * @hosthandle: pointer to the host specific data, gets stored in iod. * @lsrsp: pointer to a lsrsp structure to be used to reference * the exchange corresponding to the LS. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request
*/ int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, void *hosthandle, struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len)
{ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_ls_iod *iod; struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
dev_info(tgtport->dev, "RCV %s LS failed: payload too large (%d)\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "",
lsreqbuf_len); return -E2BIG;
}
if (!nvmet_fc_tgtport_get(tgtport)) {
dev_info(tgtport->dev, "RCV %s LS failed: target deleting\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN;
}
/* * check to see if we can send a 0's rsp. * Note: to send a 0's response, the NVME-FC host transport will * recreate the CQE. The host transport knows: sq id, SQHD (last * seen in an ersp), and command_id. Thus it will create a * zero-filled CQE with those known fields filled in. Transport * must send an ersp for any condition where the cqe won't match * this. * * Here are the FC-NVME mandated cases where we must send an ersp: * every N responses, where N=ersp_ratio * force fabric commands to send ersp's (not in FC-NVME but good * practice) * normal cmds: any time status is non-zero, or status is zero * but words 0 or 1 are non-zero. * the SQ is 90% or more full * the cmd is a fused command * transferred data length not equal to cmd iu length
*/
rspcnt = atomic_inc_return(&fod->queue->zrspcnt); if (!(rspcnt % fod->queue->ersp_ratio) ||
nvme_is_fabrics((struct nvme_command *) sqe) ||
xfr_length != fod->req.transfer_len ||
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
send_ersp = true;
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
/* * if an ABTS was received or we issued the fcp_abort early * don't call abort routine again.
*/ /* no need to take lock - lock was taken earlier to get here */ if (!fod->aborted)
tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
nvmet_fc_free_fcp_iod(fod->queue, fod);
}
staticvoid
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod)
{ int ret;
/* * for next sequence: * break at a sg element boundary * attempt to keep sequence length capped at * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to * be longer if a single sg element is larger * than that amount. This is done to avoid creating * a new sg list to use for the tgtport api.
*/
fcpreq->sg = sg;
fcpreq->sg_cnt = 0; while (tlen < remaininglen &&
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
fcpreq->sg_cnt++;
tlen += sg_dma_len(sg);
sg = sg_next(sg);
} if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
fcpreq->sg_cnt++;
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
sg = sg_next(sg);
} if (tlen < remaininglen)
fod->next_sg = sg; else
fod->next_sg = NULL;
/* * If the last READDATA request: check if LLDD supports * combined xfr with response.
*/ if ((op == NVMET_FCOP_READDATA) &&
((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
(tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
fcpreq->op = NVMET_FCOP_READDATA_RSP;
nvmet_fc_prep_fcp_rsp(tgtport, fod);
}
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) { /* * should be ok to set w/o lock as it's in the thread of * execution (not an async timer routine) and doesn't * contend with any clearing action
*/
fod->abort = true;
/* if in the middle of an io and we need to tear down */ if (abort) { if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); returntrue;
}
nvmet_fc_abort_op(tgtport, fod); returntrue;
}
returnfalse;
}
/* * actual done handler for FCP operations when completed by the lldd
*/ staticvoid
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
{ struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmet_fc_tgtport *tgtport = fod->tgtport; unsignedlong flags; bool abort;
/* transfer the next chunk */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_WRITEDATA); return;
}
/* data transfer complete, resume with nvmet layer */
fod->req.execute(&fod->req); break;
case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA_RSP: if (__nvmet_fc_fod_op_abort(fod, abort)) return; if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
nvmet_fc_abort_op(tgtport, fod); return;
}
/* success */
if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { /* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
nvmet_fc_free_fcp_iod(fod->queue, fod); return;
}
fod->offset += fcpreq->transferred_length; if (fod->offset != fod->req.transfer_len) { /* transfer the next chunk */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_READDATA); return;
}
/* data transfer complete, send response */
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
nvmet_fc_xmt_fcp_rsp(tgtport, fod);
break;
case NVMET_FCOP_RSP: if (__nvmet_fc_fod_op_abort(fod, abort)) return;
nvmet_fc_free_fcp_iod(fod->queue, fod); break;
/* * actual completion handler after execution by the nvmet layer
*/ staticvoid
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, int status)
{
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.