/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task, sqe);
/* Put timer on els request */ if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " "req\n");
qedf_ring_doorbell(fcport);
set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
/* When flush is active, * let the cmds be completed from the cleanup context
*/ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Dropping ELS completion xid=0x%x as fcport is flushing",
els_req->xid); return;
}
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
/* Kill the ELS timer */
cancel_delayed_work(&els_req->timeout_work);
/* Get ELS response length from CQE */
mp_info = &cqe->cqe_info.midpath_info;
els_req->mp_req.resp_len = mp_info->data_placement_size;
/* Parse ELS response */ if ((els_req->cb_func) && (els_req->cb_arg)) {
els_req->cb_func(els_req->cb_arg);
els_req->cb_arg = NULL;
}
/* * This should return the aborted io_req to the command pool. Note that * we need to check the refcound in case the original request was * flushed but we get a completion on this xid.
*/ if (orig_io_req && refcount > 0)
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free: /* * Release a reference to the rrq request if we timed out as the * rrq completion handler is called directly from the timeout handler * and not from els_compl where the reference would have normally been * released.
*/ if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
kref_put(&rrq_req->refcount, qedf_release_cmd);
kfree(cb_arg);
}
/* Assumes kref is already held by caller */ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
{
if (!aborted_io_req) {
QEDF_ERR(NULL, "abort_io_req is NULL.\n"); return -EINVAL;
}
fcport = aborted_io_req->fcport;
if (!fcport) {
refcount = kref_read(&aborted_io_req->refcount);
QEDF_ERR(NULL, "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
aborted_io_req->xid, refcount);
kref_put(&aborted_io_req->refcount, qedf_release_cmd); return -EINVAL;
}
/* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL;
}
qedf = fcport->qedf;
/* * Sanity check that we can send a RRQ to make sure that refcount isn't * 0
*/
refcount = kref_read(&aborted_io_req->refcount); if (refcount != 1) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
aborted_io_req->xid, aborted_io_req, refcount); return -EINVAL;
}
/* Send completed request to libfc */
fc_exch_recv(lport, fp);
}
/* * In instances where an ELS command times out we may need to restart the * rport by logging out and then logging back in.
*/ void qedf_restart_rport(struct qedf_rport *fcport)
{ struct fc_lport *lport; struct fc_rport_priv *rdata;
u32 port_id; unsignedlong flags;
if (!fcport) {
QEDF_ERR(NULL, "fcport is NULL.\n"); return;
}
spin_lock_irqsave(&fcport->rport_lock, flags); if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags); return;
}
/* Set that we are now in reset */
set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
if (!els_req) {
QEDF_ERR(NULL, "els_req is NULL.\n"); goto free_arg;
}
/* * If we are flushing the command just free the cb_arg as none of the * response data will be valid.
*/ if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
els_req->xid); goto free_arg;
}
/* * If a middle path ELS command times out, don't try to return * the command but rather do any internal cleanup and then libfc * timeout the command and clean up its internal resources.
*/ if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) { /* * If ADISC times out, libfc will timeout the exchange and then * try to send a PLOGI which will timeout since the session is * still offloaded. Force libfc to logout the session which * will offload the connection and allow the PLOGI response to * flow over the LL2 path.
*/ if (cb_arg->op == ELS_ADISC)
qedf_restart_rport(fcport); return;
}
if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is " "beyond page size.\n"); goto free_arg;
}
fc_frame_free(fp);
out_put: /* Put reference for original command since SRR completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
kfree(cb_arg);
}
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n"); return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL;
}
/* Take reference until SRR command completion */
kref_get(&orig_io_req->refcount);
srr_err: if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req" "=0x%x\n", orig_io_req->xid);
kfree(cb_arg); /* If we fail to queue SRR, send ABTS to orig_io */
qedf_initiate_abts(orig_io_req, true);
kref_put(&orig_io_req->refcount, qedf_release_cmd);
} else /* Tell other threads that SRR is in progress */
set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
/* If we timed out just free resources */ if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
QEDF_ERR(&qedf->dbg_ctx, "cqe is NULL or timeout event (0x%x)", io_req->event); goto free;
}
/* Kill the timer we put on the request */
cancel_delayed_work_sync(&io_req->timeout_work);
rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl); if (rc)
QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will " "abort, xid=0x%x.\n", io_req->xid);
free:
kfree(cb_arg);
kref_put(&io_req->refcount, qedf_release_cmd);
}
fcport = orig_io_req->fcport; if (!fcport) {
QEDF_ERR(NULL, "fcport is NULL.\n"); goto out;
}
if (!orig_io_req->sc_cmd) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for " "xid=0x%x.\n", orig_io_req->xid); goto out;
}
new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); if (!new_io_req) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new " "io_req.\n"); goto out;
}
new_io_req->sc_cmd = orig_io_req->sc_cmd;
/* * This keeps the sc_cmd struct from being returned to the tape * driver and being requeued twice. We do need to put a reference * for the original I/O request since we will not do a SCSI completion * for it.
*/
orig_io_req->sc_cmd = NULL;
kref_put(&orig_io_req->refcount, qedf_release_cmd);
spin_lock_irqsave(&fcport->rport_lock, flags);
/* kref for new command released in qedf_post_io_req on error */ if (qedf_post_io_req(fcport, new_io_req)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n"); /* Return SQE to pool */
atomic_inc(&fcport->free_sqes);
} else {
QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS, "Reissued SCSI command from orig_xid=0x%x on " "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid); /* * Abort the original I/O but do not return SCSI command as * it has been reissued on another OX_ID.
*/
spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_initiate_abts(orig_io_req, false); goto out;
}
/* Copy frame header from firmware into fp */
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
/* Copy payload from firmware into fp */
fc_payload = fc_frame_payload_get(fp, resp_len);
memcpy(fc_payload, resp_buf, resp_len);
opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if (!rjt) {
QEDF_ERR(&qedf->dbg_ctx, "payload get failed"); goto out_free_frame;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_RJT for REC: er_reason=0x%x, " "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); /* * The following response(s) mean that we need to reissue the * request on another exchange. We need to do this without * informing the upper layers lest it cause an application * error.
*/ if ((rjt->er_reason == ELS_RJT_LOGIC ||
rjt->er_reason == ELS_RJT_UNAB) &&
rjt->er_explan == ELS_EXPL_OXID_RXID) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Handle CMD LOST case.\n");
qedf_requeue_io_req(orig_io_req);
}
} elseif (opcode == ELS_LS_ACC) {
offset = ntohl(acc->reca_fc4value);
e_stat = ntohl(acc->reca_e_stat);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
offset, e_stat); if (e_stat & ESB_ST_SEQ_INIT) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Target has the seq init\n"); goto out_free_frame;
}
sc_cmd = orig_io_req->sc_cmd; if (!sc_cmd) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "sc_cmd is NULL for xid=0x%x.\n",
orig_io_req->xid); goto out_free_frame;
} /* SCSI write case */ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { if (offset == orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - response lost.\n");
r_ctl = FC_RCTL_DD_CMD_STATUS;
srr_needed = true;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - XFER_RDY/DATA lost.\n");
r_ctl = FC_RCTL_DD_DATA_DESC; /* Use data from warning CQE instead of REC */
offset = orig_io_req->tx_buf_off;
} /* SCSI read case */
} else { if (orig_io_req->rx_buf_off ==
orig_io_req->data_xfer_len) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - response lost.\n");
srr_needed = true;
r_ctl = FC_RCTL_DD_CMD_STATUS;
offset = 0;
} else {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - DATA lost.\n"); /* * For read case we always set the offset to 0 * for sequence recovery task.
*/
offset = 0;
r_ctl = FC_RCTL_DD_SOL_DATA;
}
}
if (srr_needed)
qedf_send_srr(orig_io_req, offset, r_ctl); else
qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
}
out_free_frame:
fc_frame_free(fp);
out_put: /* Put reference for original command since REC completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
out_free:
kfree(cb_arg);
}
/* Assumes kref is already held by caller */ int qedf_send_rec(struct qedf_ioreq *orig_io_req)
{
if (!orig_io_req) {
QEDF_ERR(NULL, "orig_io_req is NULL.\n"); return -EINVAL;
}
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); return -EINVAL;
}
if (!fcport->qedf) {
QEDF_ERR(NULL, "fcport->qedf is NULL.\n"); return -EINVAL;
}
/* Take reference until REC command completion */
kref_get(&orig_io_req->refcount);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.