/* * This file is part of the Chelsio FCoE driver for Linux. * * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * csio_scsi_match_io - Match an ioreq with the given SCSI level data. * @ioreq: The I/O request * @sld: Level information * * Should be called with lock held. *
*/ staticbool
csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
{ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
switch (sld->level) { case CSIO_LEV_LUN: if (scmnd == NULL) returnfalse;
case CSIO_LEV_RNODE: return ((ioreq->lnode == sld->lnode) &&
(ioreq->rnode == sld->rnode)); case CSIO_LEV_LNODE: return (ioreq->lnode == sld->lnode); case CSIO_LEV_ALL: returntrue; default: returnfalse;
}
}
/* * csio_scsi_gather_active_ios - Gather active I/Os based on level * @scm: SCSI module * @sld: Level information * @dest: The queue where these I/Os have to be gathered. * * Should be called with lock held.
*/ staticvoid
csio_scsi_gather_active_ios(struct csio_scsim *scm, struct csio_scsi_level_data *sld, struct list_head *dest)
{ struct list_head *tmp, *next;
if (list_empty(&scm->active_q)) return;
/* Just splice the entire active_q into dest */ if (sld->level == CSIO_LEV_ALL) {
list_splice_tail_init(&scm->active_q, dest); return;
}
staticinlinebool
csio_scsi_itnexus_loss_error(uint16_t error)
{ switch (error) { case FW_ERR_LINK_DOWN: case FW_RDEV_NOT_READY: case FW_ERR_RDEV_LOST: case FW_ERR_RDEV_LOGO: case FW_ERR_RDEV_IMPL_LOGO: returntrue;
} returnfalse;
}
/* * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. * @req: IO req structure. * @addr: DMA location to place the payload. * * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
*/ staticinlinevoid
csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
{ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
/* Check for Task Management */ if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = 0;
fcp_cmnd->fc_cmdref = 0;
if (wrp.size1 >= size) { /* Initialize WR in one shot */
csio_scsi_init_cmd_wr(req, wrp.addr1, size);
} else {
uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
/* * Make a temporary copy of the WR and write back * the copy into the WR pair.
*/
csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
memcpy(wrp.addr1, tmpwr, wrp.size1);
memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
}
}
/* Except 1st buffer,all buffer addr have to be Page aligned */ if (i != 0 && buf_off) {
csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
sg_addr, sg_len); goto unaligned;
}
/* Except last buffer,all buffer must end on page boundary */ if ((i != (req->nsge - 1)) &&
((buf_off + sg_len) & (ddp_pagesz - 1))) {
csio_dbg(hw, "SGL addr not ending on page boundary" "(%llx:%d)\n", sg_addr, sg_len); goto unaligned;
}
}
/* SGL's are virtually contiguous. HW will DDP to SGLs */
req->dcopy = 0;
csio_scsi_read(req);
return;
unaligned:
CSIO_INC_STATS(scsim, n_unaligned); /* * For unaligned SGLs, driver will allocate internal DDP buffer. * Once command is completed data from DDP buffer copied to SGLs
*/
req->dcopy = 1;
/* Use gen_list to store the DDP buffers */
INIT_LIST_HEAD(&req->gen_list);
xfer_len = scsi_bufflen(scmnd);
i = 0; /* Allocate ddp buffers for this request */ while (alloc_len < xfer_len) {
dma_buf = csio_get_scsi_ddp(scsim); if (dma_buf == NULL || i > scsim->max_sge) {
req->drv_status = -EBUSY; break;
}
alloc_len += dma_buf->len; /* Added to IO req */
list_add_tail(&dma_buf->list, &req->gen_list);
i++;
}
if (!req->drv_status) { /* set number of ddp bufs used */
req->nsge = i;
csio_scsi_read(req); return;
}
/* release dma descs */ if (i > 0)
csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
}
/* * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. * @req: IO req structure. * @addr: DMA location to place the payload. * @size: Size of WR * @abort: abort OR close * * Wrapper for populating fw_scsi_cmd_wr.
*/ staticinlinevoid
csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, bool abort)
{ struct csio_hw *hw = req->lnode->hwp; struct csio_rnode *rn = req->rnode; struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
if (likely(req->drv_status == 0)) { /* change state and enqueue on active_q */
csio_set_state(&req->sm, csio_scsis_io_active);
list_add_tail(&req->sm.sm_list, &scsim->active_q);
csio_wr_issue(hw, req->eq_idx, false);
CSIO_INC_STATS(scsim, n_active);
return;
} break;
case CSIO_SCSIE_START_TM:
csio_scsi_cmd(req); if (req->drv_status == 0) { /* * NOTE: We collect the affected I/Os prior to issuing * LUN reset, and not after it. This is to prevent * aborting I/Os that get issued after the LUN reset, * but prior to LUN reset completion (in the event that * the host stack has not blocked I/Os to a LUN that is * being reset.
*/
csio_set_state(&req->sm, csio_scsis_tm_active);
list_add_tail(&req->sm.sm_list, &scsim->active_q);
csio_wr_issue(hw, req->eq_idx, false);
CSIO_INC_STATS(scsim, n_tm_active);
} return;
case CSIO_SCSIE_ABORT: case CSIO_SCSIE_CLOSE: /* * NOTE: * We could get here due to : * - a window in the cleanup path of the SCSI module * (csio_scsi_abort_io()). Please see NOTE in this function. * - a window in the time we tried to issue an abort/close * of a request to FW, and the FW completed the request * itself. * Print a message for now, and return INVAL either way.
*/
req->drv_status = -EINVAL;
csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
switch (evt) { case CSIO_SCSIE_COMPLETED:
CSIO_DEC_STATS(scm, n_active);
list_del_init(&req->sm.sm_list);
csio_set_state(&req->sm, csio_scsis_uninit); /* * In MSIX mode, with multiple queues, the SCSI compeltions * could reach us sooner than the FW events sent to indicate * I-T nexus loss (link down, remote device logo etc). We * dont want to be returning such I/Os to the upper layer * immediately, since we wouldnt have reported the I-T nexus * loss itself. This forces us to serialize such completions * with the reporting of the I-T nexus loss. Therefore, we * internally queue up such up such completions in the rnode. * The reporting of I-T nexus loss to the upper layer is then * followed by the returning of I/Os in this internal queue. * Having another state alongwith another queue helps us take * actions for events such as ABORT received while we are * in this rnode queue.
*/ if (unlikely(req->wr_status != FW_SUCCESS)) {
rn = req->rnode; /* * FW says remote device is lost, but rnode * doesn't reflect it.
*/ if (csio_scsi_itnexus_loss_error(req->wr_status) &&
csio_is_rnode_ready(rn)) {
csio_set_state(&req->sm,
csio_scsis_shost_cmpl_await);
list_add_tail(&req->sm.sm_list,
&rn->host_cmpl_q);
}
}
break;
case CSIO_SCSIE_ABORT:
csio_scsi_abrt_cls(req, SCSI_ABORT); if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_aborting);
} break;
case CSIO_SCSIE_CLOSE:
csio_scsi_abrt_cls(req, SCSI_CLOSE); if (req->drv_status == 0) {
csio_wr_issue(hw, req->eq_idx, false);
csio_set_state(&req->sm, csio_scsis_closing);
} break;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit); break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
switch (evt) { case CSIO_SCSIE_COMPLETED:
csio_dbg(hw, "ioreq %p recvd cmpltd (wr_status:%d) " "in aborting st\n", req, req->wr_status); /* * Use -ECANCELED to explicitly tell the ABORTED event that * the original I/O was returned to driver by FW. * We dont really care if the I/O was returned with success by * FW (because the ABORT and completion of the I/O crossed each * other), or any other return value. Once we are in aborting * state, the success or failure of the I/O is unimportant to * us.
*/
req->drv_status = -ECANCELED; break;
case CSIO_SCSIE_ABORT:
CSIO_INC_STATS(scm, n_abrt_dups); break;
case CSIO_SCSIE_ABORTED:
csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
req, req->wr_status, req->drv_status); /* * Check if original I/O WR completed before the Abort * completion.
*/ if (req->drv_status != -ECANCELED) {
csio_warn(hw, "Abort completed before original I/O," " req:%p\n", req);
CSIO_DB_ASSERT(0);
}
/* * There are the following possible scenarios: * 1. The abort completed successfully, FW returned FW_SUCCESS. * 2. The completion of an I/O and the receipt of * abort for that I/O by the FW crossed each other. * The FW returned FW_EINVAL. The original I/O would have * returned with FW_SUCCESS or any other SCSI error. * 3. The FW couldn't sent the abort out on the wire, as there * was an I-T nexus loss (link down, remote device logged * out etc). FW sent back an appropriate IT nexus loss status * for the abort. * 4. FW sent an abort, but abort timed out (remote device * didnt respond). FW replied back with * FW_SCSI_ABORT_TIMEDOUT. * 5. FW couldn't genuinely abort the request for some reason, * and sent us an error. * * The first 3 scenarios are treated as succesful abort * operations by the host, while the last 2 are failed attempts * to abort. Manipulate the return value of the request * appropriately, so that host can convey these results * back to the upper layer.
*/ if ((req->wr_status == FW_SUCCESS) ||
(req->wr_status == FW_EINVAL) ||
csio_scsi_itnexus_loss_error(req->wr_status))
req->wr_status = FW_SCSI_ABORT_REQUESTED;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit); break;
case CSIO_SCSIE_CLOSE: /* * We can receive this event from the module * cleanup paths, if the FW forgot to reply to the ABORT WR * and left this ioreq in this state. For now, just ignore * the event. The CLOSE event is sent to this state, as * the LINK may have already gone down.
*/ break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
switch (evt) { case CSIO_SCSIE_COMPLETED:
csio_dbg(hw, "ioreq %p recvd cmpltd (wr_status:%d) " "in closing st\n", req, req->wr_status); /* * Use -ECANCELED to explicitly tell the CLOSED event that * the original I/O was returned to driver by FW. * We dont really care if the I/O was returned with success by * FW (because the CLOSE and completion of the I/O crossed each * other), or any other return value. Once we are in aborting * state, the success or failure of the I/O is unimportant to * us.
*/
req->drv_status = -ECANCELED; break;
case CSIO_SCSIE_CLOSED: /* * Check if original I/O WR completed before the Close * completion.
*/ if (req->drv_status != -ECANCELED) {
csio_fatal(hw, "Close completed before original I/O," " req:%p\n", req);
CSIO_DB_ASSERT(0);
}
/* * Either close succeeded, or we issued close to FW at the * same time FW compelted it to us. Either way, the I/O * is closed.
*/
CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
(req->wr_status == FW_EINVAL));
req->wr_status = FW_SCSI_CLOSE_REQUESTED;
case CSIO_SCSIE_DRVCLEANUP:
req->wr_status = FW_HOSTERROR;
CSIO_DEC_STATS(scm, n_active);
csio_set_state(&req->sm, csio_scsis_uninit); break;
default:
csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
CSIO_DB_ASSERT(0);
}
}
staticvoid
csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
{ switch (evt) { case CSIO_SCSIE_ABORT: case CSIO_SCSIE_CLOSE: /* * Just succeed the abort request, and hope that * the remote device unregister path will cleanup * this I/O to the upper layer within a sane * amount of time.
*/ /* * A close can come in during a LINK DOWN. The FW would have * returned us the I/O back, but not the remote device lost * FW event. In this interval, if the I/O times out at the upper * layer, a close can come in. Take the same action as abort: * return success, and hope that the remote device unregister * path will cleanup this I/O. If the FW still doesnt send * the msg, the close times out, and the upper layer resorts * to the next level of error recovery.
*/
req->drv_status = 0; break; case CSIO_SCSIE_DRVCLEANUP:
csio_set_state(&req->sm, csio_scsis_uninit); break; default:
csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
evt, req);
CSIO_DB_ASSERT(0);
}
}
/* * csio_scsi_cmpl_handler - WR completion handler for SCSI. * @hw: HW module. * @wr: The completed WR from the ingress queue. * @len: Length of the WR. * @flb: Freelist buffer array. * @priv: Private object * @scsiwr: Pointer to SCSI WR. * * This is the WR completion handler called per completion from the * ISR. It is called with lock held. It walks past the RSS and CPL message * header where the actual WR is present. * It then gets the status, WR handle (ioreq pointer) and the len of * the WR, based on WR opcode. Only on a non-good status is the entire * WR copied into the WR cache (ioreq->fw_wr). * The ioreq corresponding to the WR is returned to the caller. * NOTE: The SCSI queue doesnt allocate a freelist today, hence * no freelist buffer is expected.
*/ struct csio_ioreq *
csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
{ struct csio_ioreq *ioreq = NULL; struct cpl_fw6_msg *cpl;
uint8_t *tempwr;
uint8_t status; struct csio_scsim *scm = csio_hw_to_scsim(hw);
csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
CSIO_INC_STATS(scm, n_inval_scsiop); return NULL;
}
/* * csio_scsi_cleanup_io_q - Cleanup the given queue. * @scm: SCSI module. * @q: Queue to be cleaned up. * * Called with lock held. Has to exit with lock held.
*/ void
csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
{ struct csio_hw *hw = scm->hw; struct csio_ioreq *ioreq; struct list_head *tmp, *next; struct scsi_cmnd *scmnd;
/* Call back the completion routines of the active_q */
list_for_each_safe(tmp, next, q) {
ioreq = (struct csio_ioreq *)tmp;
csio_scsi_drvcleanup(ioreq);
list_del_init(&ioreq->sm.sm_list);
scmnd = csio_scsi_cmnd(ioreq);
spin_unlock_irq(&hw->lock);
/* * Upper layers may have cleared this command, hence this * check to avoid accessing stale references.
*/ if (scmnd != NULL)
ioreq->io_cbfn(hw, ioreq);
/* * csio_scsi_abort_io_q - Abort all I/Os on given queue * @scm: SCSI module. * @q: Queue to abort. * @tmo: Timeout in ms * * Attempt to abort all I/Os on given queue, and wait for a max * of tmo milliseconds for them to complete. Returns success * if all I/Os are aborted. Else returns -ETIMEDOUT. * Should be entered with lock held. Exits with lock held. * NOTE: * Lock has to be held across the loop that aborts I/Os, since dropping the lock * in between can cause the list to be corrupted. As a result, the caller * of this function has to ensure that the number of I/os to be aborted * is finite enough to not cause lock-held-for-too-long issues.
*/ staticint
csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
{ struct csio_hw *hw = scm->hw; struct list_head *tmp, *next; int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); struct scsi_cmnd *scmnd;
if (list_empty(q)) return 0;
csio_dbg(hw, "Aborting SCSI I/Os\n");
/* Now abort/close I/Os in the queue passed */
list_for_each_safe(tmp, next, q) {
scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
}
/* Wait till all active I/Os are completed/aborted/closed */ while (!list_empty(q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all aborts completed */ if (list_empty(q)) return 0;
return -ETIMEDOUT;
}
/* * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. * @scm: SCSI module. * @abort: abort required. * Called with lock held, should exit with lock held. * Can sleep when waiting for I/Os to complete.
*/ int
csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
{ struct csio_hw *hw = scm->hw; int rv = 0; int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
/* No I/Os pending */ if (list_empty(&scm->active_q)) return 0;
/* Wait until all active I/Os are completed */ while (!list_empty(&scm->active_q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all I/Os completed */ if (list_empty(&scm->active_q)) return 0;
/* * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. * @scm: SCSI module. * @lnode: lnode * * Called with lock held, should exit with lock held. * Can sleep (with dropped lock) when waiting for I/Os to complete.
*/ int
csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
{ struct csio_hw *hw = scm->hw; struct csio_scsi_level_data sld; int rv; int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
/* No I/Os pending on this lnode */ if (list_empty(&ln->cmpl_q)) return 0;
/* Wait until all active I/Os on this lnode are completed */ while (!list_empty(&ln->cmpl_q) && count--) {
spin_unlock_irq(&hw->lock);
msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
spin_lock_irq(&hw->lock);
}
/* all I/Os completed */ if (list_empty(&ln->cmpl_q)) return 0;
csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
/* I/Os are pending, abort them */
rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); if (rv != 0) {
csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
}
case FW_SCSI_ABORT_REQUESTED: case FW_SCSI_ABORTED: case FW_SCSI_CLOSE_REQUESTED:
csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
cmnd->cmnd[0],
(req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? "closed" : "aborted"); /* * csio_eh_abort_handler checks this value to * succeed or fail the abort request.
*/
host_status = DID_REQUEUE; if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
CSIO_INC_STATS(scm, n_closed); else
CSIO_INC_STATS(scm, n_aborted); break;
case FW_SCSI_ABORT_TIMEDOUT: /* FW timed out the abort itself */
csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
req, cmnd, req->wr_status);
host_status = DID_ERROR;
CSIO_INC_STATS(scm, n_abrt_timedout); break;
case FW_RDEV_NOT_READY: /* * In firmware, a RDEV can get into this state * temporarily, before moving into dissapeared/lost * state. So, the driver should complete the request equivalent * to device-disappeared!
*/
CSIO_INC_STATS(scm, n_rdev_nr_error);
host_status = DID_ERROR; break;
case FW_ERR_RDEV_LOST:
CSIO_INC_STATS(scm, n_rdev_lost_error);
host_status = DID_ERROR; break;
case FW_ERR_RDEV_LOGO:
CSIO_INC_STATS(scm, n_rdev_logo_error);
host_status = DID_ERROR; break;
case FW_ERR_RDEV_IMPL_LOGO:
host_status = DID_ERROR; break;
case FW_ERR_LINK_DOWN:
CSIO_INC_STATS(scm, n_link_down_error);
host_status = DID_ERROR; break;
case FW_FCOE_NO_XCHG:
CSIO_INC_STATS(scm, n_no_xchg_error);
host_status = DID_ERROR; break;
/* Get req->nsge, if there are SG elements to be mapped */
nsge = scsi_dma_map(cmnd); if (unlikely(nsge < 0)) {
CSIO_INC_STATS(scsim, n_dmamap_error); goto err;
}
/* Do we support so many mappings? */ if (unlikely(nsge > scsim->max_sge)) {
csio_warn(hw, "More SGEs than can be supported." " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
CSIO_INC_STATS(scsim, n_unsupp_sge_error); goto err_dma_unmap;
}
/* Get a free ioreq structure - SM is already set to uninit */
ioreq = csio_get_scsi_ioreq_lock(hw, scsim); if (!ioreq) {
csio_err(hw, "Out of I/O request elements. Active #:%d\n",
scsim->stats.n_active);
CSIO_INC_STATS(scsim, n_no_req_error); goto err_dma_unmap;
}
/* Kick off SCSI IO SM on the ioreq */
spin_lock_irqsave(&hw->lock, flags);
retval = csio_scsi_start_io(ioreq);
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
ioreq, retval);
CSIO_INC_STATS(scsim, n_busy_error); goto err_put_req;
}
staticint
csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
{ int rv; int cpu = smp_processor_id(); struct csio_lnode *ln = ioreq->lnode; struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; /* * Use current processor queue for posting the abort/close, but retain * the ingress queue ID of the original I/O being aborted/closed - we * need the abort/close completion to be received on the same queue * as the original I/O.
*/
ioreq->eq_idx = sqset->eq_idx;
/* FW successfully aborted the request */ if (host_byte(cmnd->result) == DID_REQUEUE) {
csio_info(hw, "Aborted SCSI command to (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
scsi_cmd_to_rq(cmnd)->tag); return SUCCESS;
} else {
csio_info(hw, "Failed to abort SCSI command, (%d:%llu) tag %u\n",
cmnd->device->id, cmnd->device->lun,
scsi_cmd_to_rq(cmnd)->tag); return FAILED;
}
}
/* * csio_tm_cbfn - TM callback function. * @hw: HW module. * @req: IO request. * * Cache the result in 'cmnd', since ioreq will be freed soon * after we return from here, and the waiting thread shouldnt trust * the ioreq contents.
*/ staticvoid
csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
{ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); struct csio_dma_buf *dma_buf;
uint8_t flags = 0; struct fcp_resp_with_ext *fcp_resp; struct fcp_resp_rsp_info *rsp_info;
csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
req, req->wr_status);
/* Cache FW return status */
csio_priv(cmnd)->wr_status = req->wr_status;
/* Special handling based on FCP response */
/* * FW returns us this error, if flags were set. FCP4 says * FCP_RSP_LEN_VAL in flags shall be set for TM completions. * So if a target were to set this bit, we expect that the * rsp_code is set to FCP_TMF_CMPL for a successful TM * completion. Any other rsp_code means TM operation failed. * If a target were to just ignore setting flags, we treat * the TM operation as success, and FW returns FW_SUCCESS.
*/ if (req->wr_status == FW_SCSI_RSP_ERR) {
dma_buf = &req->dma_buf;
fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
flags = fcp_resp->resp.fr_flags;
/* Modify return status if flags indicate success */ if (flags & FCP_RSP_LEN_VAL) if (rsp_info->rsp_code == FCP_TMF_CMPL)
csio_priv(cmnd)->wr_status = FW_SUCCESS;
csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
cmnd->device->lun, rn->flowid, rn->scsi_id);
if (!csio_is_lnode_ready(ln)) {
csio_err(hw, "LUN reset cannot be issued on non-ready" " local node vnpi:0x%x (LUN:%llu)\n",
ln->vnp_flowid, cmnd->device->lun); goto fail;
}
/* Lnode is ready, now wait on rport node readiness */
ret = fc_block_scsi_eh(cmnd); if (ret) return ret;
/* * If we have blocked in the previous call, at this point, either the * remote node has come back online, or device loss timer has fired * and the remote node is destroyed. Allow the LUN reset only for * the former case, since LUN reset is a TMF I/O on the wire, and we * need a valid session to issue it.
*/ if (fc_remote_port_chkready(rn->rport)) {
csio_err(hw, "LUN reset cannot be issued on non-ready" " remote node ssni:0x%x (LUN:%llu)\n",
rn->flowid, cmnd->device->lun); goto fail;
}
/* Get a free ioreq structure - SM is already set to uninit */
ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
if (!ioreq) {
csio_err(hw, "Out of IO request elements. Active # :%d\n",
scsim->stats.n_active); goto fail;
}
/* * FW times the LUN reset for ioreq->tmo, so we got to wait a little * longer (10s for now) than that to allow FW to return the timed * out command.
*/
count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
/* Set cbfn */
ioreq->io_cbfn = csio_tm_cbfn;
/* Save of the ioreq info for later use */
sld.level = CSIO_LEV_LUN;
sld.lnode = ioreq->lnode;
sld.rnode = ioreq->rnode;
sld.oslun = cmnd->device->lun;
spin_lock_irqsave(&hw->lock, flags); /* Kick off TM SM on the ioreq */
retval = csio_scsi_start_tm(ioreq);
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
ioreq, retval); goto fail_ret_ioreq;
}
csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
count * (CSIO_SCSI_TM_POLL_MS / 1000)); /* Wait for completion */ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
&& count--)
msleep(CSIO_SCSI_TM_POLL_MS);
/* LUN reset succeeded, Start aborting affected I/Os */ /* * Since the host guarantees during LUN reset that there * will not be any more I/Os to that LUN, until the LUN reset * completes, we gather pending I/Os after the LUN reset.
*/
spin_lock_irq(&hw->lock);
csio_scsi_gather_active_ios(scsim, &sld, &local_q);
/* Aborts may have timed out */ if (retval != 0) {
csio_err(hw, "Attempt to abort I/Os during LUN reset of %llu" " returned %d\n", cmnd->device->lun, retval); /* Return I/Os back to active_q */
spin_lock_irq(&hw->lock);
list_splice_tail_init(&local_q, &scsim->active_q);
spin_unlock_irq(&hw->lock); goto fail;
}
/* * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. * @scm: SCSI Module * @hw: HW device. * @buf_size: buffer size * @num_buf : Number of buffers. * * This routine allocates DMA buffers required for SCSI Data xfer, if * each SGL buffer for a SCSI Read request posted by SCSI midlayer are * not virtually contiguous.
*/ staticint
csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, int buf_size, int num_buf)
{ int n = 0; struct list_head *tmp; struct csio_dma_buf *ddp_desc = NULL;
uint32_t unit_size = 0;
if (!num_buf) return 0;
if (!buf_size) return -EINVAL;
INIT_LIST_HEAD(&scm->ddp_freelist);
/* Align buf size to page size */
buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; /* Initialize dma descriptors */ for (n = 0; n < num_buf; n++) { /* Set unit size to request size */
unit_size = buf_size;
ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); if (!ddp_desc) {
csio_err(hw, "Failed to allocate ddp descriptors," " Num allocated = %d.\n",
scm->stats.n_free_ddp); goto no_mem;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.