// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
if (!io->scsi_tgt_cb) {
efct_scsi_check_pending(efct); return;
}
/* Call target server completion */
cb = io->scsi_tgt_cb;
/* Clear the callback before invoking the callback */
io->scsi_tgt_cb = NULL;
/* if status was good, and auto-good-response was set, * then callback target-server with IO_CMPL_RSP_SENT, * otherwise send IO_CMPL
*/ if (status == 0 && io->auto_resp)
flags |= EFCT_SCSI_IO_CMPL_RSP_SENT; else
flags |= EFCT_SCSI_IO_CMPL;
switch (status) { case SLI4_FC_WCQE_STATUS_SUCCESS:
scsi_stat = EFCT_SCSI_STATUS_GOOD; break; case SLI4_FC_WCQE_STATUS_DI_ERROR: if (ext_status & SLI4_FC_DI_ERROR_GE)
scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR; elseif (ext_status & SLI4_FC_DI_ERROR_AE)
scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR; elseif (ext_status & SLI4_FC_DI_ERROR_RE)
scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR; else
scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR; break; case SLI4_FC_WCQE_STATUS_LOCAL_REJECT: switch (ext_status) { case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET: case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
scsi_stat = EFCT_SCSI_STATUS_ABORTED; break; case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST; break; case SLI4_FC_LOCAL_REJECT_NO_XRI:
scsi_stat = EFCT_SCSI_STATUS_NO_IO; break; default: /*we have seen 0x0d(TX_DMA_FAILED err)*/
scsi_stat = EFCT_SCSI_STATUS_ERROR; break;
} break;
case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT: /* target IO timed out */
scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED; break;
case SLI4_FC_WCQE_STATUS_SHUTDOWN: /* Target IO cancelled by HW */
scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN; break;
if (!hio_to_abort) { /* * If "IO to abort" does not have an * associated HW IO, immediately make callback with * success. The command must have been sent to * the backend, but the data phase has not yet * started, so we don't have a HW IO. * * Note: since the backend shims should be * taking a reference on io_to_abort, it should not * be possible to have been completed and freed by * the backend before the abort got here.
*/
scsi_io_printf(io, "IO: not active\n");
((efct_hw_done_t)io->hw_cb)(io->hio, 0,
SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
rc = 0; break;
}
/* HW IO is valid, abort it */
scsi_io_printf(io, "aborting\n");
rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
io->send_abts, io->hw_cb, io); if (rc) { int status = SLI4_FC_WCQE_STATUS_SUCCESS;
efct_hw_done_t cb = io->hw_cb;
if (rc != -ENOENT && rc != -EINPROGRESS) {
status = -1;
scsi_io_printf(io, "Failed to abort IO rc=%d\n",
rc);
}
cb(io->hio, 0, status, 0, io);
rc = 0;
}
if (!io) {
spin_unlock_irqrestore(&xport->io_pending_lock, flags); return NULL;
}
if (io->io_type == EFCT_IO_TYPE_ABORT) {
hio = NULL;
} else {
hio = efct_hw_io_alloc(&efct->hw); if (!hio) { /* * No HW IO available.Put IO back on * the front of pending list
*/
list_add(&xport->io_pending_list, &io->io_pending_link);
io = NULL;
} else {
hio->eq = io->hw_priv;
}
}
/* Must drop the lock before dispatching the IO */
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
if (!io) return NULL;
/* * We pulled an IO off the pending list, * and either got an HW IO or don't need one
*/
atomic_sub_return(1, &xport->io_pending_count); if (!hio)
status = efct_scsi_io_dispatch_no_hw_io(io); else
status = efct_scsi_io_dispatch_hw_io(io, hio); if (status) { /* * Invoke the HW callback, but do so in the * separate execution context,provided by the * NOP mailbox completion processing context * by using efct_hw_async_call()
*/ if (efct_hw_async_call(&efct->hw,
efct_scsi_check_pending_async_cb, io)) {
efc_log_debug(efct, "call hw async failed\n");
}
}
/* Guard against recursion */ if (atomic_add_return(1, &xport->io_pending_recursing)) { /* This function is already running. Decrement and return. */
atomic_sub_return(1, &xport->io_pending_recursing); return;
}
while (efct_scsi_dispatch_pending(efct))
count++;
if (count) {
atomic_sub_return(1, &xport->io_pending_recursing); return;
}
/* * If nothing was removed from the list, * we might be in a case where we need to abort an * active IO and the abort is on the pending list. * Look for an abort we can dispatch.
*/
list_for_each_entry(io, &xport->io_pending_list, io_pending_link) { if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) { /* This IO has a HW IO, so it is * active. Dispatch the abort.
*/
dispatch = 1;
list_del_init(&io->io_pending_link);
atomic_sub_return(1, &xport->io_pending_count); break;
}
}
/* * if this IO already has a HW IO, then this is either * not the first phase of the IO. Send it to the HW.
*/ if (io->hio) return efct_scsi_io_dispatch_hw_io(io, io->hio);
/* * We don't already have a HW IO associated with the IO. First check * the pending list. If not empty, add IO to the tail and process the * pending list.
*/
spin_lock_irqsave(&xport->io_pending_lock, flags); if (!list_empty(&xport->io_pending_list)) { /* * If this is a low latency request, * the put at the front of the IO pending * queue, otherwise put it at the end of the queue.
*/ if (io->low_latency) {
INIT_LIST_HEAD(&io->io_pending_link);
list_add(&xport->io_pending_list, &io->io_pending_link);
} else {
INIT_LIST_HEAD(&io->io_pending_link);
list_add_tail(&io->io_pending_link,
&xport->io_pending_list);
}
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
atomic_add_return(1, &xport->io_pending_count);
atomic_add_return(1, &xport->io_total_pending);
/* process pending list */
efct_scsi_check_pending(efct); return 0;
}
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
/* * We don't have a HW IO associated with the IO and there's nothing * on the pending list. Attempt to allocate a HW IO and dispatch it.
*/
hio = efct_hw_io_alloc(&io->efct->hw); if (!hio) { /* Couldn't get a HW IO. Save this IO on the pending list */
spin_lock_irqsave(&xport->io_pending_lock, flags);
INIT_LIST_HEAD(&io->io_pending_link);
list_add_tail(&io->io_pending_link, &xport->io_pending_list);
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
/* * For aborts, we don't need a HW IO, but we still want * to pass through the pending list to preserve ordering. * Thus, if the pending list is not empty, add this abort * to the pending list and process the pending list.
*/
spin_lock_irqsave(&xport->io_pending_lock, flags); if (!list_empty(&xport->io_pending_list)) {
INIT_LIST_HEAD(&io->io_pending_link);
list_add_tail(&io->io_pending_link, &xport->io_pending_list);
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
atomic_add_return(1, &xport->io_pending_count);
atomic_add_return(1, &xport->io_total_pending);
/* process pending list */
efct_scsi_check_pending(efct); return 0;
}
spin_unlock_irqrestore(&xport->io_pending_lock, flags);
/* if this is the last data phase and there is no residual, enable * auto-good-response
*/ if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
((io->transferred + io->wire_len) == io->exp_xfer_len) &&
(!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
io->auto_resp = true;
} else {
io->auto_resp = false;
}
/* save this transfer length */
io->xfer_req = io->wire_len;
/* Adjust the transferred count to account for overrun * when the residual is calculated in efct_scsi_send_resp
*/
io->transferred += residual;
/* Adjust the SGL size if there is overrun */
if (residual) { struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
/* set residual status if necessary */ if (residual != 0) { /* FCP: if data transferred is less than the * amount expected, then this is an underflow. * If data transferred would have been greater * than the amount expected this is an overflow
*/ if (residual > 0) {
fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
fcprsp->ext.fr_resid = cpu_to_be32(residual);
} else {
fcprsp->resp.fr_flags |= FCP_RESID_OVER;
fcprsp->ext.fr_resid = cpu_to_be32(-residual);
}
}
if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) { if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
efc_log_err(efct, "Sense exceeds max size.\n"); return -EIO;
}
/* take a reference on IO being aborted */ if (kref_get_unless_zero(&io->ref) == 0) { /* command no longer active */
scsi_io_printf(io, "command no longer active\n"); return -EIO;
}
/* * allocate a new IO to send the abort request. Use efct_io_alloc() * directly, as we need an IO object that will not fail allocation * due to allocations being disabled (in efct_scsi_io_alloc())
*/
abort_io = efct_io_pool_io_alloc(efct->xport->io_pool); if (!abort_io) {
atomic_add_return(1, &xport->io_alloc_failed_count);
kref_put(&io->ref, io->release); return -EIO;
}
/* Save the target server callback and argument */ /* set generic fields */
abort_io->cmd_tgt = true;
abort_io->node = io->node;
/* set type and abort-specific fields */
abort_io->io_type = EFCT_IO_TYPE_ABORT;
abort_io->display_name = "tgt_abort";
abort_io->io_to_abort = io;
abort_io->send_abts = false;
abort_io->abort_cb = cb;
abort_io->abort_cb_arg = arg;
/* now dispatch IO */
rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb); if (rc)
kref_put(&io->ref, io->release); return rc;
}
void
efct_scsi_io_complete(struct efct_io *io)
{ if (io->io_free) {
efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
io->tag); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.