/* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list.
*/ staticvoid fnic_release_ioreq_buf(struct fnic *fnic, struct fnic_io_req *io_req, struct scsi_cmnd *sc)
{ if (io_req->sgl_list_pa)
dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
DMA_TO_DEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]); if (io_req->sense_buf_pa)
dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */ staticint free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsignedint hwq)
{ /* if no Ack received from firmware, then nothing to clean */ if (!fnic->fw_ack_recd[hwq]) return 1;
/* * Update desc_available count based on number of freed descriptors * Account for wraparound
*/ if (wq->to_clean_index <= fnic->fw_ack_index[hwq])
wq->ring.desc_avail += (fnic->fw_ack_index[hwq]
- wq->to_clean_index + 1); else
wq->ring.desc_avail += (wq->ring.desc_count
- wq->to_clean_index
+ fnic->fw_ack_index[hwq] + 1);
/* * just bump clean index to ack_index+1 accounting for wraparound * this will essentially free up all descriptors between * to_clean_index and fw_ack_index, both inclusive
*/
wq->to_clean_index =
(fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count;
/* we have processed the acks received so far */
fnic->fw_ack_recd[hwq] = 0; return 0;
}
if (iport->state != FNIC_IPORT_STATE_READY) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "returning DID_NO_CONNECT for IO as iport state: %d\n",
iport->state);
sc->result = DID_NO_CONNECT << 16;
done(sc); return 0;
}
/* fc_remote_port_add() may have added the tport to * fc_transport but dd_data not yet set
*/
rdd_data = rport->dd_data;
tport = rdd_data->tport; if (!tport || (rdd_data->iport != iport)) {
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "dd_data not yet set in SCSI for rport portid: 0x%x\n",
rport->port_id);
tport = fnic_find_tport_by_fcid(iport, rport->port_id); if (!tport) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
rport->port_id);
sc->result = DID_BUS_BUSY << 16;
done(sc); return 0;
}
/* Re-assign same params as in fnic_fdls_add_tport */
rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
rport->supported_classes =
FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; /* the dd_data is allocated by fctransport of size dd_fcrport_size */
rdd_data = rport->dd_data;
rdd_data->tport = tport;
rdd_data->iport = iport;
tport->rport = rport;
tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
}
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY; goto out;
}
memset(io_req, 0, sizeof(*io_req));
/* Map the data buffer */
sg_count = scsi_dma_map(sc); if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool); goto out;
}
io_req->tport = tport; /* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT; if (sg_count > FNIC_DFLT_SG_DESC_CNT)
io_req->sgl_type = FNIC_SGL_CACHE_MAX;
if (sg_count) {
io_req->sgl_list =
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC); if (!io_req->sgl_list) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool); goto out;
}
/* fnic should be in FC_TRANS_ETH_MODE */ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) {
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "reset cmpl success\n"); /* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "reset failed with header status: %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Unexpected state while processing reset completion: %s\n",
fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
if (fnic->fw_reset_done)
complete(fnic->fw_reset_done);
/* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out
*/ if (ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_free_txq(&fnic->tx_queue); goto reset_cmpl_handler_end;
}
staticinlineint is_ack_index_in_range(struct vnic_wq_copy *wq,
u16 request_out)
{ if (wq->to_clean_index <= wq->to_use_index) { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index ||
request_out >= wq->to_use_index) return 0;
} else { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index &&
request_out >= wq->to_use_index) return 0;
} /* request_out index is in range */ return 1;
}
/* * Mark that ack received and store the Ack index. If there are multiple * acks received before Tx thread cleans it up, the latest value will be * used which is correct behavior. This state should be in the copy Wq * instead of in the fnic
*/ staticinlinevoid fnic_fcpio_ack_handler(struct fnic *fnic, unsignedint cq_index, struct fcpio_fw_req *desc)
{ struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out; unsignedlong flags;
u64 *ox_id_tag = (u64 *)(void *)desc; unsignedint wq_index = cq_index;
/* mark the ack state */
wq = &fnic->hw_copy_wq[cq_index];
spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags);
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
fcpio_tag_id_dec(&ftag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl;
mqtag = id;
tag = blk_mq_unique_tag_to_tag(mqtag);
hwq = blk_mq_unique_tag_to_hwq(mqtag);
WARN_ON_ONCE(!io_req); if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc); return;
}
start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
/* * if SCSI-ML has already issued abort on this command, * set completion of the IO. The abts path will clean it up
*/ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
/* * set the FNIC_IO_DONE so that this doesn't get * flagged as 'out of order' if it was not aborted
*/
fnic_priv(sc)->flags |= FNIC_IO_DONE;
fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
case FCPIO_TIMEOUT: /* request was timed out */
atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_ABORTED: /* request was aborted */
atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
atomic64_inc(&fnic_stats->io_stats.io_not_found);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_FW_ERR: /* request was terminated due fw error */
atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
case FCPIO_INVALID_HEADER: /* header contains invalid data */ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ default:
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break;
}
/* Break link with the SCSI command */
fnic_priv(sc)->io_req = NULL;
io_req->sc = NULL;
fnic_priv(sc)->flags |= FNIC_IO_DONE;
fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
/* If it is sg3utils allocated SC then tag_id * is max_tag_id and SC is retrieved from io_req
*/ if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; if (io_req)
sc = io_req->sc;
} else {
sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
}
WARN_ON_ONCE(!sc); if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
shost_printk(KERN_ERR, fnic->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), tag); return;
}
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req); if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), tag, sc); return;
}
start_time = io_req->start_time;
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { /* Abort and terminate completion of device reset req */ /* REVISIT : Add asserts about various flags */
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
fnic_priv(sc)->abts_status = hdr_status;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; if (io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} elseif (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */
shost_printk(KERN_DEBUG, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status)); switch (hdr_status) { case FCPIO_SUCCESS: break; case FCPIO_TIMEOUT: if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts); else
atomic64_inc(
&term_stats->terminate_fw_timeouts); break; case FCPIO_ITMF_REJECTED:
FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK)); break; case FCPIO_IO_NOT_FOUND: if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found); else
atomic64_inc(
&term_stats->terminate_io_not_found); break; default: if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures); else
atomic64_inc(
&term_stats->terminate_failures); break;
} if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { /* * We will be here only when FW completes reset * without sending completions for outstanding ios.
*/
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; if (io_req && io_req->dr_done)
complete(io_req->dr_done); elseif (io_req && io_req->abts_done)
complete(io_req->abts_done);
/* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state
*/
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
hwq, abt_tag, fnic_priv(sc)->flags);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); returntrue;
}
/* * Found IO that is still pending with firmware and * belongs to rport that went away
*/ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); returntrue;
}
if (io_req->abts_done) {
shost_printk(KERN_ERR, fnic->host, "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
/* Queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req, hwq)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset
*/
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
hwq, abt_tag, fnic_priv(sc)->flags); if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; else
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
if (!rport) {
pr_err("rport is NULL\n"); return;
}
rdd_data = rport->dd_data; if (rdd_data) {
tport = rdd_data->tport; if (!tport) {
pr_err( "term rport io called after tport is deleted. Returning 0x%8x\n",
rport->port_id);
} else {
pr_err( "term rport io called after tport is set 0x%8x\n",
rport->port_id);
pr_err( "tport maybe rediscovered\n");
/* * FCP-SCSI specific handling for module unload *
*/ void fnic_scsi_unload(struct fnic *fnic)
{ unsignedlong flags;
/* * Mark state so that the workqueue thread stops forwarding * received frames and link events to the local port. ISR and * other threads that can queue work items will also stop * creating work items on the fnic workqueue
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
fnic_scsi_fcpio_reset(fnic);
/* * This function is exported to SCSI for sending abort cmnds. * A SCSI IO is represented by a io_req in the driver. * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
*/ int fnic_abort_cmd(struct scsi_cmnd *sc)
{ struct request *const rq = scsi_cmd_to_rq(sc); struct fnic_iport_s *iport; struct fnic_tport_s *tport; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; struct rport_dd_data_s *rdd_data; unsignedlong flags; unsignedlong start_time = 0; int ret = SUCCESS;
u32 task_req = 0; struct scsi_lun fc_lun; struct fnic_stats *fnic_stats; struct abort_stats *abts_stats; struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; int mqtag; unsignedlong abt_issued_time;
uint16_t hwq = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
fnic = *((struct fnic **) shost_priv(sc->device->host));
spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* * Avoid a race between SCSI issuing the abort and the device * completing the command. * * If the command is already completed by the fw cmpl code, * we just return SUCCESS from here. This means that the abort * succeeded. In the SCSI ML, since the timeout for command has * happened, the completion wont actually complete the command * and it will be considered as an aborted command * * .io_req will not be cleared except while holding io_req_lock.
*/
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req; if (!io_req) {
ret = FAILED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_abort_cmd_end;
}
io_req->abts_done = &tm_done;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto wait_pending;
}
FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
sc->cmnd[0], abt_issued_time); /* * Command is still pending, need to abort it * If the firmware completes the command after this point, * the completion wont be done till mid-layer, since abort * has already started.
*/
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.