pa = dma_map_single(&snic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE); if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc));
ret = -ENOMEM;
return ret;
}
int_to_scsilun(sc->device->lun, &lun); if (sc->sc_data_direction == DMA_FROM_DEVICE)
flags |= SNIC_ICMND_RD; if (sc->sc_data_direction == DMA_TO_DEVICE)
flags |= SNIC_ICMND_WR;
/* * snic_queuecommand * Routine to send a scsi cdb to LLD * Called with host_lock held and interrupts disabled
*/ int
snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
{ struct snic_tgt *tgt = NULL; struct snic *snic = shost_priv(shost); int ret;
tgt = starget_to_tgt(scsi_target(sc->device));
ret = snic_tgt_chkready(tgt); if (ret) {
SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
sc->result = ret;
scsi_done(sc);
return 0;
}
if (snic_get_state(snic) != SNIC_ONLINE) {
SNIC_HOST_ERR(shost, "snic state is %s\n",
snic_state_str[snic_get_state(snic)]);
switch (cmpl_status) { case SNIC_STAT_IO_SUCCESS:
CMD_FLAGS(sc) |= SNIC_IO_DONE; break;
case SNIC_STAT_ABORTED:
CMD_FLAGS(sc) |= SNIC_IO_ABORTED; break;
default:
SNIC_BUG_ON(1);
}
}
/* * snic_process_io_failed_state: * Processes IO's error states
*/ staticvoid
snic_process_io_failed_state(struct snic *snic, struct snic_icmnd_cmpl *icmnd_cmpl, struct scsi_cmnd *sc,
u8 cmpl_stat)
{ int res = 0;
switch (cmpl_stat) { case SNIC_STAT_TIMEOUT: /* Req was timedout */
atomic64_inc(&snic->s_stats.misc.io_tmo);
res = DID_TIME_OUT; break;
case SNIC_STAT_ABORTED: /* Req was aborted */
atomic64_inc(&snic->s_stats.misc.io_aborted);
res = DID_ABORT; break;
case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
res = DID_ERROR; break;
case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
atomic64_inc(&snic->s_stats.fw.out_of_res);
res = DID_REQUEUE; break;
case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */
atomic64_inc(&snic->s_stats.io.io_not_found);
res = DID_ERROR; break;
case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/
atomic64_inc(&snic->s_stats.misc.sgl_inval);
res = DID_ERROR; break;
case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */
atomic64_inc(&snic->s_stats.fw.io_errs);
res = DID_ERROR; break;
case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */
atomic64_inc(&snic->s_stats.fw.scsi_errs); break;
case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ case SNIC_STAT_DEV_OFFLINE: /* Device offline */
res = DID_NO_CONNECT; break;
case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */ case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */ case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */ case SNIC_STAT_CMND_REJECT: /* Req rejected */ case SNIC_STAT_FATAL_ERROR: /* XPT Error */ default:
SNIC_SCSI_DBG(snic->shost, "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
res = DID_ERROR; break;
}
SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
/* Set sc->result */
sc->result = (res << 16) | icmnd_cmpl->scsi_status;
} /* end of snic_process_io_failed_state */
/* * snic_tmreq_pending : is task management in progress.
*/ staticint
snic_tmreq_pending(struct scsi_cmnd *sc)
{ int state = CMD_STATE(sc);
if (cmnd_id >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost, "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat)); return;
}
/* * if SCSI-ML has already issued abort on this command, * ignore completion of the IO. The abts path will clean it up
*/ if (unlikely(snic_tmreq_pending(sc))) {
snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
spin_unlock_irqrestore(io_lock, flags);
snic_stats_update_io_cmpl(&snic->s_stats);
/* Expected value is SNIC_STAT_ABORTED */ if (likely(hdr_stat == SNIC_STAT_ABORTED)) return;
SNIC_SCSI_DBG(snic->shost, "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
(int)(cmnd_id & SNIC_TAG_MASK),
snic_io_status_to_str(cmpl_stat),
CMD_FLAGS(sc));
/* * If scsi_eh thread is blocked waiting for abts complete, * signal completion to it. IO will be cleaned in the thread, * else clean it in this context.
*/ if (rqi->abts_done) {
complete(rqi->abts_done);
spin_unlock_irqrestore(io_lock, flags);
/* spl case, dev reset issued through ioctl */ if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
rqi = (struct snic_req_info *) ctx;
sc = rqi->sc;
goto ioctl_dev_rst;
}
if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost, "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat));
SNIC_BUG_ON(1);
ioctl_dev_rst: if (!sc) {
atomic64_inc(&snic->s_stats.io.sc_null);
SNIC_HOST_ERR(snic->shost, "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
snic_io_status_to_str(hdr_stat), cmnd_id);
return;
}
snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
} /* end of snic_itmf_cmpl_handler */
if (cmnd_id >= snic->max_tag_id) {
SNIC_HOST_ERR(snic->shost, "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
cmnd_id, snic_io_status_to_str(hdr_stat));
SNIC_BUG_ON(1);
return 1;
}
sc = scsi_host_find_tag(snic->shost, cmnd_id);
ioctl_hba_rst: if (!sc) {
atomic64_inc(&snic->s_stats.io.sc_null);
SNIC_HOST_ERR(snic->shost, "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
snic_io_status_to_str(hdr_stat), cmnd_id);
ret = 1;
if (!snic->remove_wait) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost, "reset_cmpl:host reset completed after timeout\n");
ret = 1;
/* Check for snic subsys errors */ switch (fwreq->hdr.status) { case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
SNIC_HOST_ERR(snic->shost, "sNIC SubSystem is NOT Ready.\n"); break;
case SNIC_STAT_FATAL_ERROR: /* XPT Error */
SNIC_HOST_ERR(snic->shost, "sNIC SubSystem in Unrecoverable State.\n"); break;
}
switch (fwreq->hdr.type) { case SNIC_RSP_EXCH_VER_CMPL:
snic_io_exch_ver_cmpl_handler(snic, fwreq); break;
case SNIC_RSP_REPORT_TGTS_CMPL:
snic_report_tgt_cmpl_handler(snic, fwreq); break;
case SNIC_RSP_ICMND_CMPL:
snic_icmnd_cmpl_handler(snic, fwreq); break;
case SNIC_RSP_ITMF_CMPL:
snic_itmf_cmpl_handler(snic, fwreq); break;
case SNIC_RSP_HBA_RESET_CMPL:
snic_hba_reset_cmpl_handler(snic, fwreq); break;
case SNIC_MSG_ACK:
snic_msg_ack_handler(snic, fwreq); break;
case SNIC_MSG_ASYNC_EVNOTIFY:
snic_aen_handler(snic, fwreq); break;
/* * snic_fwcq_cmpl_handler * Routine to process fwCQ * This CQ is independent, and not associated with wq/rq/wq_copy queues
*/ int
snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
{ unsignedint num_ent = 0; /* number cq entries processed */ unsignedint cq_idx; unsignedint nent_per_cq; struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
/* * In case of multiple aborts on same cmd, * use try_wait_for_completion and completion_done() to check * whether it queues aborts even after completion of abort issued * prior.SNIC_BUG_ON(completion_done(&rqi->done));
*/
ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq)); if (ret)
SNIC_HOST_ERR(snic->shost, "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret); else
SNIC_SCSI_DBG(snic->shost, "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
tmf, sc, rqi, req_id, snic_cmd_tag(sc));
return ret;
} /* end of snic_queue_itmf_req */
staticint
snic_issue_tm_req(struct snic *snic, struct snic_req_info *rqi, struct scsi_cmnd *sc, int tmf)
{ struct snic_host_req *tmreq = NULL; int req_id = 0, tag = snic_cmd_tag(sc); int ret = 0;
if (snic_get_state(snic) == SNIC_FWRESET) return -EBUSY;
SNIC_SCSI_DBG(snic->shost, "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
tag, sc, CMD_FLAGS(sc));
ret = FAILED;
goto abort_fail;
}
rqi->abts_done = NULL;
ret = FAILED;
/* Check the abort status. */ switch (CMD_ABTS_STATUS(sc)) { case SNIC_INVALID_CODE: /* Firmware didn't complete abort req, timedout */
CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
atomic64_inc(&snic->s_stats.abts.drv_tmo);
SNIC_SCSI_DBG(snic->shost, "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
sc, snic_cmd_tag(sc), CMD_FLAGS(sc)); /* do not release snic request in timedout case */
rqi = NULL;
goto abort_fail;
case SNIC_STAT_IO_SUCCESS: case SNIC_STAT_IO_NOT_FOUND:
ret = SUCCESS; /* * If abort path doesn't call scsi_done(), * the # IO timeouts == 2, will cause the LUN offline. * Call scsi_done to complete the IO.
*/
sc->result = (DID_ERROR << 16);
scsi_done(sc); break;
default: /* Firmware completed abort with error */
ret = FAILED;
rqi = NULL; break;
}
CMD_SP(sc) = NULL;
SNIC_HOST_INFO(snic->shost, "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
CMD_FLAGS(sc));
abort_fail:
spin_unlock_irqrestore(io_lock, flags); if (rqi)
snic_release_req_buf(snic, rqi, sc);
/* * Avoid a race between SCSI issuing the abort and the device * completing the command. * * If the command is already completed by fw_cmpl code, * we just return SUCCESS from here. This means that the abort * succeeded. In the SCSI ML, since the timeout for command has * happend, the completion wont actually complete the command * and it will be considered as an aborted command * * The CMD_SP will not be cleared except while holding io_lock
*/
spin_lock_irqsave(io_lock, flags);
rqi = (struct snic_req_info *) CMD_SP(sc); if (!rqi) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost, "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
tag, CMD_FLAGS(sc));
ret = SUCCESS;
goto send_abts_end;
}
rqi->abts_done = &tm_done; if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
ret = 0; goto abts_pending;
}
SNIC_BUG_ON(!rqi->abts_done);
/* Save Command State, should be restored on failed to Queue. */
sv_state = CMD_STATE(sc);
/* * Command is still pending, need to abort it * If the fw completes the command after this point, * the completion won't be done till mid-layer, since abot * has already started.
*/
CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
spin_unlock_irqrestore(io_lock, flags);
/* Now Queue the abort command to firmware */
ret = snic_queue_abort_req(snic, rqi, sc, tmf); if (ret) {
atomic64_inc(&snic->s_stats.abts.q_fail);
SNIC_HOST_ERR(snic->shost, "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
tag, ret, CMD_FLAGS(sc));
spin_lock_irqsave(io_lock, flags); /* Restore Command's previous state */
CMD_STATE(sc) = sv_state;
rqi = (struct snic_req_info *) CMD_SP(sc); if (rqi)
rqi->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
abts_pending: /* * Queued an abort IO, wait for its completion. * Once the fw completes the abort command, it will * wakeup this thread.
*/
wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
send_abts_end: return ret;
} /* end of snic_send_abort_and_wait */
/* * This function is exported to SCSI for sending abort cmnds. * A SCSI IO is represent by snic_ioreq in the driver. * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
*/ int
snic_abort_cmd(struct scsi_cmnd *sc)
{ struct snic *snic = shost_priv(sc->device->host); int ret = SUCCESS, tag = snic_cmd_tag(sc);
u32 start_time = jiffies;
/* walk through the tag map, an dcheck if IOs are still pending in fw*/ for (tag = 0; tag < snic->max_tag_id; tag++) {
io_lock = snic_io_lock_tag(snic, tag);
/* * Found IO that is still pending w/ firmware and belongs to * the LUN that is under reset, if lr_sc != NULL
*/
SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
snic_ioreq_state_to_str(CMD_STATE(sc)));
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
/* Ignore Cmd that don't belong to Lun Reset device */ if (!sc || sc->device != lr_sdev) goto skip_clean;
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi) goto skip_clean;
if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) goto skip_clean;
if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
SNIC_SCSI_DBG(snic->shost, "clean_single_req: devrst is not pending sc 0x%p\n",
sc);
goto skip_clean;
}
SNIC_SCSI_DBG(snic->shost, "clean_single_req: Found IO in %s on lun\n",
snic_ioreq_state_to_str(CMD_STATE(sc)));
/* Save Command State */
sv_state = CMD_STATE(sc);
/* * Any pending IO issued prior to reset is expected to be * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING * to indicate the IO is abort pending. * When IO is completed, the IO will be handed over and handled * in this function.
*/
/* * Cleanup any IOs on this LUN that have still not completed. * If any of these fail, then LUN Reset fails. * Cleanup cleans all commands on this LUN except * the lun reset command. If all cmds get cleaned, the LUN Reset * succeeds.
*/
ret = snic_dr_clean_pending_req(snic, sc); if (ret) {
spin_lock_irqsave(io_lock, flags);
SNIC_SCSI_DBG(snic->shost, "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
snic_cmd_tag(sc));
rqi = (struct snic_req_info *) CMD_SP(sc);
spin_unlock_irqrestore(io_lock, flags); /* * The Command state is changed to IOREQ_PENDING, * in this case, if the command is completed, the icmnd_cmpl will * mark the cmd as completed. * This logic still makes LUN Reset is inevitable.
*/
ret = snic_queue_dr_req(snic, rqi, sc); if (ret) {
SNIC_HOST_ERR(snic->shost, "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
tag, ret, CMD_FLAGS(sc));
spin_lock_irqsave(io_lock, flags); /* Restore State */
CMD_STATE(sc) = sv_state;
rqi = (struct snic_req_info *) CMD_SP(sc); if (rqi)
rqi->dr_done = NULL; /* rqi is freed in caller. */
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
/* * auxillary funciton to check lun reset op is supported or not * Not supported if returns 0
*/ staticint
snic_dev_reset_supported(struct scsi_device *sdev)
{ struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
/* * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN * fail to get aborted. It calls driver's eh_device_reset with a SCSI * command on the LUN.
*/ int
snic_device_reset(struct scsi_cmnd *sc)
{ struct Scsi_Host *shost = sc->device->host; struct snic *snic = shost_priv(shost); struct snic_req_info *rqi = NULL; int tag = snic_cmd_tag(sc); int start_time = jiffies; int ret = FAILED; int dr_supp = 0;
SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
snic_cmd_tag(sc));
dr_supp = snic_dev_reset_supported(sc->device); if (!dr_supp) { /* device reset op is not supported */
SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
goto dev_rst_end;
}
if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
snic_unlink_and_release_req(snic, sc, 0);
SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
goto dev_rst_end;
}
/* There is no tag when lun reset is issue through ioctl. */ if (unlikely(tag <= SNIC_NO_TAG)) {
SNIC_HOST_INFO(snic->shost, "Devrst: LUN Reset Recvd thru IOCTL.\n");
rqi = snic_req_init(snic, 0); if (!rqi) goto dev_rst_end;
int
snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
{ struct snic *snic = shost_priv(shost); enum snic_state sv_state; unsignedlong flags; int ret = FAILED;
/* Set snic state as SNIC_FWRESET*/
sv_state = snic_get_state(snic);
spin_lock_irqsave(&snic->snic_lock, flags); if (snic_get_state(snic) == SNIC_FWRESET) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
/* Wait for all the IOs that are entered in Qcmd */ while (atomic_read(&snic->ios_inflight))
schedule_timeout(msecs_to_jiffies(1));
ret = snic_issue_hba_reset(snic, sc); if (ret) {
SNIC_HOST_ERR(shost, "reset:Host Reset Failed w/ err %d.\n",
ret);
spin_lock_irqsave(&snic->snic_lock, flags);
snic_set_state(snic, sv_state);
spin_unlock_irqrestore(&snic->snic_lock, flags);
atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
ret = FAILED;
goto reset_end;
}
ret = SUCCESS;
reset_end: return ret;
} /* end of snic_reset */
/* * SCSI Error handling calls driver's eh_host_reset if all prior * error handling levels return FAILED. * * Host Reset is the highest level of error recovery. If this fails, then * host is offlined by SCSI.
*/ int
snic_host_reset(struct scsi_cmnd *sc)
{ struct Scsi_Host *shost = sc->device->host;
u32 start_time = jiffies; int ret;
/* * CASE : FW didn't post itmf completion due to PCIe Errors. * Marking the abort status as Success to call scsi completion * in snic_abort_finish()
*/
CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
rqi = (struct snic_req_info *) CMD_SP(sc); if (!rqi) return;
if (rqi->dr_done)
complete(rqi->dr_done); elseif (rqi->abts_done)
complete(rqi->abts_done);
}
/* * snic_scsi_cleanup: Walks through tag map and releases the reqs
*/ staticvoid
snic_scsi_cleanup(struct snic *snic, int ex_tag)
{ struct snic_req_info *rqi = NULL; struct scsi_cmnd *sc = NULL;
spinlock_t *io_lock = NULL; unsignedlong flags; int tag;
u64 st_time = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.