/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. *
*******************************************************************/ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/unaligned.h> #include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <linux/blk-cgroup.h> #include <net/checksum.h>
/** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. * @phba: Pointer to HBA object. * @lpfc_cmd: lpfc scsi command object pointer. * * This function is called from the lpfc_prep_task_mgmt_cmd function to * set the last bit in the response sge entry.
**/ staticvoid
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; if (sgl) {
sgl += 1;
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
}
}
/** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. * * This routine is called when there is resource error in driver or firmware. * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine * posts at most 1 event each second. This routine wakes up worker thread of * @phba to process WORKER_RAM_DOWN_EVENT event. * * This routine should be called with no lock held.
**/ void
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
{ unsignedlong flags;
uint32_t evt_posted; unsignedlong expires;
if (!evt_posted)
lpfc_worker_wake_up(phba); return;
}
/** * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler * @phba: The Hba for which this call is being executed. * * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker * thread.This routine reduces queue depth for all scsi device on each vport * associated with @phba.
**/ void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{ struct lpfc_vport **vports; struct Scsi_Host *shost; struct scsi_device *sdev; unsignedlong new_queue_depth; unsignedlong num_rsrc_err; int i;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
/* * The error and success command counters are global per * driver instance. If another handler has already * operated on this error event, just exit.
*/ if (num_rsrc_err == 0) return;
vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) { if (num_rsrc_err >= sdev->queue_depth)
new_queue_depth = 1; else
new_queue_depth = sdev->queue_depth -
num_rsrc_err;
scsi_change_queue_depth(sdev, new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
}
/** * lpfc_scsi_dev_block - set all scsi hosts to block state * @phba: Pointer to HBA context object. * * This function walks vport list and set each SCSI host to block state * by invoking fc_remote_port_delete() routine. This function is invoked * with EEH when device's PCI slot has been permanently disabled.
**/ void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{ struct lpfc_vport **vports; struct Scsi_Host *shost; struct scsi_device *sdev; struct fc_rport *rport; int i;
vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
rport = starget_to_rport(scsi_target(sdev));
fc_remote_port_delete(rport);
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
/** * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec * @vport: The virtual port for which this call being executed. * @num_to_alloc: The requested number of buffers to allocate. * * This routine allocates a scsi buffer for device with SLI-3 interface spec, * the scsi buffer contains all the necessary information needed to initiate * a SCSI I/O. The non-DMAable buffer region contains information to build * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, * and the initial BPL. In addition to allocating memory, the FCP CMND and * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. * * Return codes: * int - number of scsi buffers that were allocated. * 0 = failure, less than num_to_alloc is a partial failure.
**/ staticint
lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
{ struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *psb; struct ulp_bde64 *bpl;
IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_cmd;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_sgl;
uint16_t iotag; int bcnt, bpl_size;
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); if (!psb) break;
/* * Get memory from the pci pool to map the virt space to pci * bus space for an I/O. The DMA buffer includes space for the * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize.
*/
psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle); if (!psb->data) {
kfree(psb); break;
}
/* * The first two bdes are the FCP_CMD and FCP_RSP. The balance * are sg list bdes. Initialize the first two and leave the * rest for queuecommand.
*/
bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
/* Setup the physical region for the FCP RSP */
bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
/* * Since the IOCB for the FCP I/O is built into this * lpfc_scsi_buf, initialize it with all known data now.
*/
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0; if ((phba->sli_rev == 3) &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { /* fill in immediate fcp command BDE */
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
unsli3.fcp_ext.icd);
iocb->un.fcpi64.bdl.addrHigh = 0;
iocb->ulpBdeCount = 0;
iocb->ulpLe = 0; /* fill in response BDE */
iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
BUFF_TYPE_BDE_64;
iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = sizeof(struct fcp_rsp);
iocb->unsli3.fcp_ext.rbde.addrLow =
putPaddrLow(pdma_phys_fcp_rsp);
iocb->unsli3.fcp_ext.rbde.addrHigh =
putPaddrHigh(pdma_phys_fcp_rsp);
} else {
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
iocb->un.fcpi64.bdl.bdeSize =
(2 * sizeof(struct ulp_bde64));
iocb->un.fcpi64.bdl.addrLow =
putPaddrLow(pdma_phys_sgl);
iocb->un.fcpi64.bdl.addrHigh =
putPaddrHigh(pdma_phys_sgl);
iocb->ulpBdeCount = 1;
iocb->ulpLe = 1;
}
iocb->ulpClass = CLASS3;
psb->status = IOSTAT_SUCCESS; /* Put it back into the SCSI buffer list */
psb->cur_iocbq.io_buf = psb;
spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
}
return bcnt;
}
/** * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport * @vport: pointer to lpfc vport data structure. * * This routine is invoked by the vport cleanup for deletions and the cleanup * for an ndlp on removal.
**/ void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{ struct lpfc_hba *phba = vport->phba; struct lpfc_io_buf *psb, *next_psb; struct lpfc_sli4_hdw_queue *qp; unsignedlong iflag = 0; int idx;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return;
/* may be called before queues established if hba_setup fails */ if (!phba->sli4_hba.hdwq) return;
/* The sdev is not guaranteed to be valid post * scsi_done upcall.
*/ if (cmd)
scsi_done(cmd);
/* * We expect there is an abort thread waiting * for command completion wake up the thread.
*/
spin_lock_irqsave(&psb->buf_lock, iflag);
psb->cur_iocbq.cmd_flag &=
~LPFC_DRIVER_ABORTED; if (psb->waitq)
wake_up(psb->waitq);
spin_unlock_irqrestore(&psb->buf_lock, iflag);
}
lpfc_release_scsi_buf_s4(phba, psb); if (rrq_empty)
lpfc_worker_wake_up(phba); if (!offline) return;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_io_buf_list_lock); continue;
}
}
spin_unlock(&qp->abts_io_buf_list_lock); if (!offline) { for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
/** * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success
**/ staticstruct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd)
{ struct lpfc_io_buf *lpfc_cmd = NULL; struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; unsignedlong iflag = 0;
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
} return lpfc_cmd;
} /** * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success
**/ staticstruct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd)
{ struct lpfc_io_buf *lpfc_cmd; struct lpfc_sli4_hdw_queue *qp; struct sli4_sge_le *sgl;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
uint32_t cpu, idx; int tag; struct fcp_cmd_rsp_buf *tmp = NULL;
cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
idx = blk_mq_unique_tag_to_hwq(tag);
} else {
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
/* * The first two SGEs are the FCP_CMD and FCP_RSP. * The balance are sg list bdes. Initialize the * first two and leave the rest for queuecommand.
*/
sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
bf_set_le32(lpfc_sli4_sge_last, sgl, 0); if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN)
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32)); else
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
sgl++;
/* Setup the physical region for the FCP RSP */
pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32);
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
bf_set_le32(lpfc_sli4_sge_last, sgl, 1);
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
} return lpfc_cmd;
} /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA * @phba: The HBA for which this call is being executed. * @ndlp: pointer to a node-list data structure. * @cmnd: Pointer to scsi_cmnd data structure. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success
**/ staticstruct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, struct scsi_cmnd *cmnd)
{ return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
}
/** * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @phba * lpfc_scsi_buf_list list.
**/ staticvoid
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{ unsignedlong iflag = 0;
/** * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @hdwq * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted.
**/ staticvoid
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{ struct lpfc_sli4_hdw_queue *qp; unsignedlong iflag = 0;
/** * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * * This routine releases @psb scsi buffer by adding it to tail of @phba * lpfc_scsi_buf_list list.
**/ staticvoid
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
{ if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
atomic_dec(&psb->ndlp->cmd_pending);
/** * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB * @data: A pointer to the immediate command data portion of the IOCB. * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. * * The routine copies the entire FCP command from @fcp_cmnd to @data while * byte swapping the data to big endian format for transmission on the wire.
**/ staticvoid
lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
{ int i, j;
for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
i += sizeof(uint32_t), j++) {
((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
}
}
/** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans * through sg elements and format the bde. This routine also initializes all * IOCB fields which are dependent on scsi command request buffer. * * Return codes: * 1 - Error * 0 - Success
**/ staticint
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
uint32_t num_bde = 0; int nseg, datadir = scsi_cmnd->sc_data_direction;
/* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry.
*/
bpl += 2; if (scsi_sg_count(scsi_cmnd)) { /* * The driver stores the segment count returned from dma_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU.
*/
nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
scsi_sg_count(scsi_cmnd), datadir); if (unlikely(!nseg)) return 1;
lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9064 BLKGRD: %s: Too many sg segments" " from dma_map_sg. Config %d, seg_cnt" " %d\n", __func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd); return 2;
}
/* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single scsi command. Just run through the seg_cnt and format * the bde's. * When using SLI-3 the driver will try to fit all the BDEs into * the IOCB. If it can't then the BDEs get added to a BPL as it * does for SLI-2 mode.
*/
scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel); if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
!(iocbq->cmd_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
data_bde->addrLow = putPaddrLow(physaddr);
data_bde->addrHigh = putPaddrHigh(physaddr);
data_bde++;
} else {
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = sg_dma_len(sgel);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl->addrLow =
le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh =
le32_to_cpu(putPaddrHigh(physaddr));
bpl++;
}
}
}
/* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is * explicitly reinitialized and for SLI-3 the extended bde count is * explicitly reinitialized since all iocb memory resources are reused.
*/ if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
!(iocbq->cmd_flag & DSS_SECURITY_OP)) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { /* * The extended IOCB format can only fit 3 BDE or a BPL. * This I/O has more than 3 BDE so the 1st data bde will * be a BPL that is filled in here.
*/
physaddr = lpfc_cmd->dma_handle;
data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
data_bde->tus.f.bdeSize = (num_bde * sizeof(struct ulp_bde64));
physaddr += (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
(2 * sizeof(struct ulp_bde64)));
data_bde->addrHigh = putPaddrHigh(physaddr);
data_bde->addrLow = putPaddrLow(physaddr); /* ebde count includes the response bde and data bpl */
iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
} else { /* ebde count includes the response bde and data bdes */
iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
}
} else {
iocb_cmd->un.fcpi64.bdl.bdeSize =
((num_bde + 2) * sizeof(struct ulp_bde64));
iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
}
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
/* * Due to difference in data length between DIF/non-DIF paths, * we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/* Return BG_ERR_INIT if error injection is detected by Initiator */ #define BG_ERR_INIT 0x1 /* Return BG_ERR_TGT if error injection is detected by Target */ #define BG_ERR_TGT 0x2 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ #define BG_ERR_SWAP 0x10 /* * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for * error injection
*/ #define BG_ERR_CHECK 0x20
/** * lpfc_bg_err_inject - Determine if we should inject an error * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @reftag: (out) BlockGuard reference tag for transmitted data * @apptag: (out) BlockGuard application tag for transmitted data * @new_guard: (in) Value to replace CRC with if needed * * Returns BG_ERR_* bit mask or 0 if request ignored
**/ staticint
lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
{ struct scatterlist *sgpe; /* s/g prot entry */ struct lpfc_io_buf *lpfc_cmd = NULL; struct scsi_dif_tuple *src = NULL; struct lpfc_nodelist *ndlp; struct lpfc_rport_data *rdata;
uint32_t op = scsi_get_prot_op(sc);
uint32_t blksize;
uint32_t numblks;
u32 lba; int rc = 0; int blockoff = 0;
/* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
blksize = scsi_prot_interval(sc);
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
/* Make sure we have the right LBA if one is specified */ if (phba->lpfc_injerr_lba < (u64)lba ||
(phba->lpfc_injerr_lba >= (u64)(lba + numblks))) return 0; if (sgpe) {
blockoff = phba->lpfc_injerr_lba - (u64)lba;
numblks = sg_dma_len(sgpe) / sizeof(struct scsi_dif_tuple); if (numblks < blockoff)
blockoff = numblks;
}
}
/* Next check if we need to match the remote NPortID or WWPN */
rdata = lpfc_rport_data_from_scsi_device(sc->device); if (rdata && rdata->pnode) {
ndlp = rdata->pnode;
/* Make sure we have the right NPortID if one is specified */ if (phba->lpfc_injerr_nportid &&
(phba->lpfc_injerr_nportid != ndlp->nlp_DID)) return 0;
/* * Make sure we have the right WWPN if one is specified. * wwn[0] should be a non-zero NAA in a good WWPN.
*/ if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name)) != 0)) return 0;
}
/* Setup a ptr to the protection data if the SCSI host provides it */ if (sgpe) {
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
src += blockoff;
lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
}
/* Should we change the Reference Tag */ if (reftag) { if (phba->lpfc_injerr_wref_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: if (src) { /* * For WRITE_PASS, force the error * to be sent on the wire. It should * be detected by the Target. * If blockoff != 0 error will be * inserted in middle of the IO.
*/
/* * Save the old ref_tag so we can * restore it on completion.
*/ if (lpfc_cmd) {
lpfc_cmd->prot_data_type =
LPFC_INJERR_REFTAG;
lpfc_cmd->prot_data_segment =
src;
lpfc_cmd->prot_data =
src->ref_tag;
}
src->ref_tag = cpu_to_be32(0xDEADBEEF);
phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
break;
}
fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the error * to be sent on the wire. It should be * detected by the Target.
*/ /* DEADBEEF will be the reftag on the wire */
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9078 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsignedlong)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port.
*/
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_wref_cnt--; if (phba->lpfc_injerr_wref_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9077 BLKGRD: Injecting reftag error: " "write lba x%lx\n", (unsignedlong)lba); break;
}
} if (phba->lpfc_injerr_rref_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver.
*/
*reftag = 0xDEADBEEF;
phba->lpfc_injerr_rref_cnt--; if (phba->lpfc_injerr_rref_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT;
/* Should we change the Application Tag */ if (apptag) { if (phba->lpfc_injerr_wapp_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS: if (src) { /* * For WRITE_PASS, force the error * to be sent on the wire. It should * be detected by the Target. * If blockoff != 0 error will be * inserted in middle of the IO.
*/
/* * Save the old app_tag so we can * restore it on completion.
*/ if (lpfc_cmd) {
lpfc_cmd->prot_data_type =
LPFC_INJERR_APPTAG;
lpfc_cmd->prot_data_segment =
src;
lpfc_cmd->prot_data =
src->app_tag;
}
src->app_tag = cpu_to_be16(0xDEAD);
phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_TGT | BG_ERR_CHECK; break;
}
fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the * error to be sent on the wire. It should be * detected by the Target.
*/ /* DEAD will be the apptag on the wire */
*apptag = 0xDEAD;
phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_TGT | BG_ERR_CHECK;
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0813 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsignedlong)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port.
*/
*apptag = 0xDEAD;
phba->lpfc_injerr_wapp_cnt--; if (phba->lpfc_injerr_wapp_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT;
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0812 BLKGRD: Injecting apptag error: " "write lba x%lx\n", (unsignedlong)lba); break;
}
} if (phba->lpfc_injerr_rapp_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver.
*/
*apptag = 0xDEAD;
phba->lpfc_injerr_rapp_cnt--; if (phba->lpfc_injerr_rapp_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT;
/* Should we change the Guard Tag */ if (new_guard) { if (phba->lpfc_injerr_wgrd_cnt) { switch (op) { case SCSI_PROT_WRITE_PASS:
rc = BG_ERR_CHECK;
fallthrough;
case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the * error to be sent on the wire. It should be * detected by the Target.
*/
phba->lpfc_injerr_wgrd_cnt--; if (phba->lpfc_injerr_wgrd_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc |= BG_ERR_TGT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0817 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsignedlong)lba); break; case SCSI_PROT_WRITE_STRIP: /* * For WRITE_STRIP and WRITE_PASS, * force the error on data * being copied from SLI-Host to SLI-Port.
*/
phba->lpfc_injerr_wgrd_cnt--; if (phba->lpfc_injerr_wgrd_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0816 BLKGRD: Injecting guard error: " "write lba x%lx\n", (unsignedlong)lba); break;
}
} if (phba->lpfc_injerr_rgrd_cnt) { switch (op) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_READ_STRIP: case SCSI_PROT_READ_PASS: /* * For READ_STRIP and READ_PASS, force the * error on data being read off the wire. It * should force an IO error to the driver.
*/
phba->lpfc_injerr_rgrd_cnt--; if (phba->lpfc_injerr_rgrd_cnt == 0) {
phba->lpfc_injerr_nportid = 0;
phba->lpfc_injerr_lba =
LPFC_INJERR_LBA_OFF;
memset(&phba->lpfc_injerr_wwpn,
0, sizeof(struct lpfc_name));
}
rc = BG_ERR_INIT | BG_ERR_SWAP; /* Signals the caller to swap CRC->CSUM */
/** * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with * the specified SCSI command. * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @txop: (out) BlockGuard operation for transmitted data * @rxop: (out) BlockGuard operation for received data * * Returns: zero on success; non-zero if tx and/or rx op cannot be determined *
**/ staticint
lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
{
uint8_t ret = 0;
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP:
*rxop = BG_OP_IN_NODIF_OUT_CSUM;
*txop = BG_OP_IN_CSUM_OUT_NODIF; break;
case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT:
*rxop = BG_OP_IN_CRC_OUT_NODIF;
*txop = BG_OP_IN_NODIF_OUT_CRC; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS:
*rxop = BG_OP_IN_CRC_OUT_CSUM;
*txop = BG_OP_IN_CSUM_OUT_CRC; break;
case SCSI_PROT_NORMAL: default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
scsi_get_prot_op(sc));
ret = 1; break;
}
} else { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT:
*rxop = BG_OP_IN_CRC_OUT_NODIF;
*txop = BG_OP_IN_NODIF_OUT_CRC; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS:
*rxop = BG_OP_IN_CRC_OUT_CRC;
*txop = BG_OP_IN_CRC_OUT_CRC; break;
case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP:
*rxop = BG_OP_IN_NODIF_OUT_CRC;
*txop = BG_OP_IN_CRC_OUT_NODIF; break;
case SCSI_PROT_NORMAL: default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
scsi_get_prot_op(sc));
ret = 1; break;
}
}
return ret;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS /** * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with * the specified SCSI command in order to force a guard tag error. * @phba: The Hba for which this call is being executed. * @sc: The SCSI command to examine * @txop: (out) BlockGuard operation for transmitted data * @rxop: (out) BlockGuard operation for received data * * Returns: zero on success; non-zero if tx and/or rx op cannot be determined *
**/ staticint
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
{
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP:
*rxop = BG_OP_IN_NODIF_OUT_CRC;
*txop = BG_OP_IN_CRC_OUT_NODIF; break;
case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT:
*rxop = BG_OP_IN_CSUM_OUT_NODIF;
*txop = BG_OP_IN_NODIF_OUT_CSUM; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS:
*rxop = BG_OP_IN_CSUM_OUT_CRC;
*txop = BG_OP_IN_CRC_OUT_CSUM; break;
case SCSI_PROT_NORMAL: default: break;
}
} else { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT:
*rxop = BG_OP_IN_CSUM_OUT_NODIF;
*txop = BG_OP_IN_NODIF_OUT_CSUM; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS:
*rxop = BG_OP_IN_CSUM_OUT_CSUM;
*txop = BG_OP_IN_CSUM_OUT_CSUM; break;
case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP:
*rxop = BG_OP_IN_NODIF_OUT_CSUM;
*txop = BG_OP_IN_CSUM_OUT_NODIF; break;
case SCSI_PROT_NORMAL: default: break;
}
}
return 0;
} #endif
/** * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @bpl: pointer to buffer list for protection groups * @datasegcnt: number of segments of data that have been dma mapped * * This function sets up BPL buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF * * This is usually used when the HBA is instructed to generate * DIFs and insert them into data stream (or strip DIF from * incoming data stream) * * The buffer list consists of just one protection group described * below: * +-------------------------+ * start of prot group --> | PDE_5 | * +-------------------------+ * | PDE_6 | * +-------------------------+ * | Data BDE | * +-------------------------+ * |more Data BDE's ... (opt)| * +-------------------------+ * * * Note: Data s/g buffers have been dma mapped * * Returns the number of BDEs added to the BPL.
**/ staticint
lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datasegcnt)
{ struct scatterlist *sgde = NULL; /* s/g data entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL;
dma_addr_t physaddr; int i = 0, num_bde = 0, status; int datadir = sc->sc_data_direction; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc; #endif
uint32_t checking = 1;
uint32_t reftag;
uint8_t txop, rxop;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out;
/* extract some info from the scsi command for pde*/
reftag = scsi_prot_ref_tag(sc);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK)
checking = 0;
} #endif
/* setup PDE5 with what we have */
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
/* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
pde5->reftag = cpu_to_le32(reftag);
/* setup PDE6 with the rest of the info */
memset(pde6, 0, sizeof(struct lpfc_pde6));
bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
/* * We only need to check the data on READs, for WRITEs * protection data is automatically generated, not checked.
*/ if (datadir == DMA_FROM_DEVICE) { if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(pde6_ce, pde6, checking); else
bf_set(pde6_ce, pde6, 0);
/* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
pde6->word1 = cpu_to_le32(pde6->word1);
pde6->word2 = cpu_to_le32(pde6->word2);
/* advance bpl and increment bde count */
num_bde++;
bpl++;
/* assumption: caller has already run dma_map_sg on command data */
scsi_for_each_sg(sc, sgde, datasegcnt, i) {
physaddr = sg_dma_address(sgde);
bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
bpl->tus.f.bdeSize = sg_dma_len(sgde); if (datadir == DMA_TO_DEVICE)
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; else
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
num_bde++;
}
out: return num_bde;
}
/** * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @bpl: pointer to buffer list for protection groups * @datacnt: number of segments of data that have been dma mapped * @protcnt: number of segment of protection data that have been dma mapped * * This function sets up BPL buffer list for protection groups of * type LPFC_PG_TYPE_DIF * * This is usually used when DIFs are in their own buffers, * separate from the data. The HBA can then by instructed * to place the DIFs in the outgoing stream. For read operations, * The HBA could extract the DIFs and place it in DIF buffers. * * The buffer list for this type consists of one or more of the * protection groups described below: * +-------------------------+ * start of first prot group --> | PDE_5 | * +-------------------------+ * | PDE_6 | * +-------------------------+ * | PDE_7 (Prot BDE) | * +-------------------------+ * | Data BDE | * +-------------------------+ * |more Data BDE's ... (opt)| * +-------------------------+ * start of new prot group --> | PDE_5 | * +-------------------------+ * | ... | * +-------------------------+ * * Note: It is assumed that both data and protection s/g buffers have been * mapped for DMA * * Returns the number of BDEs added to the BPL.
**/ staticint
lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datacnt, int protcnt)
{ struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL; struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr; unsignedshort curr_prot = 0; unsignedint split_offset; unsignedint protgroup_len, protgroup_offset = 0, protgroup_remainder; unsignedint protgrp_blks, protgrp_bytes; unsignedint remainder, subtotal; int status; int datadir = sc->sc_data_direction; unsignedchar pgdone = 0, alldone = 0; unsigned blksize; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc; #endif
uint32_t checking = 1;
uint32_t reftag;
uint8_t txop, rxop; int num_bde = 0;
/* setup PDE6 with the rest of the info */
memset(pde6, 0, sizeof(struct lpfc_pde6));
bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
bf_set(pde6_optx, pde6, txop);
bf_set(pde6_oprx, pde6, rxop);
/* check if this pde is crossing the 4K boundary; if so split */ if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
protgroup_offset += protgroup_remainder;
protgrp_blks = protgroup_remainder / 8;
protgrp_bytes = protgrp_blks * blksize;
} else {
protgroup_offset = 0;
curr_prot++;
}
num_bde++;
/* setup BDE's for data blocks associated with DIF data */
pgdone = 0;
subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { /* Check to see if we ran out of space */ if (num_bde >= phba->cfg_total_seg_cnt) return num_bde + 1;
/* Move to the next s/g segment if possible */
sgde = sg_next(sgde);
}
if (protgroup_offset) { /* update the reference tag */
reftag += protgrp_blks;
bpl++; continue;
}
/* are we done ? */ if (curr_prot == protcnt) {
alldone = 1;
} elseif (curr_prot < protcnt) { /* advance to next prot buffer */
sgpe = sg_next(sgpe);
bpl++;
/* update the reference tag */
reftag += protgrp_blks;
} else { /* if we're here, we have a bug */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "9054 BLKGRD: bug in %s\n", __func__);
}
} while (!alldone);
out:
return num_bde;
}
/** * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @sgl: pointer to buffer list for protection groups * @datasegcnt: number of segments of data that have been dma mapped * @lpfc_cmd: lpfc scsi command object pointer. * * This function sets up SGL buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF * * This is usually used when the HBA is instructed to generate * DIFs and insert them into data stream (or strip DIF from * incoming data stream) * * The buffer list consists of just one protection group described * below: * +-------------------------+ * start of prot group --> | DI_SEED | * +-------------------------+ * | Data SGE | * +-------------------------+ * |more Data SGE's ... (opt)| * +-------------------------+ * * * Note: Data s/g buffers have been dma mapped * * Returns the number of SGEs added to the SGL.
**/ static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd)
{ struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr; int i = 0, status;
uint32_t reftag, num_sge = 0;
uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc; #endif
uint32_t checking = 1;
uint32_t dma_len;
uint32_t dma_offset = 0; struct sli4_hybrid_sgl *sgl_xtra = NULL; int j; bool lsp_just_set = false;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out;
/* extract some info from the scsi command for pde*/
reftag = scsi_prot_ref_tag(sc);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK)
checking = 0;
} #endif
/* setup DISEED with what we have */
diseed = (struct sli4_sge_diseed *) sgl;
memset(diseed, 0, sizeof(struct sli4_sge_diseed));
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
/* * We only need to check the data on READs, for WRITEs * protection data is automatically generated, not checked.
*/ if (sc->sc_data_direction == DMA_FROM_DEVICE) { if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); else
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
/* Endianness conversion if necessary for DISEED */
diseed->word2 = cpu_to_le32(diseed->word2);
diseed->word3 = cpu_to_le32(diseed->word3);
/* advance bpl and increment sge count */
num_sge++;
sgl++;
/* assumption: caller has already run dma_map_sg on command data */
sgde = scsi_sglist(sc);
j = 3; for (i = 0; i < datasegcnt; i++) { /* clear it */
sgl->word2 = 0;
/* do we need to expand the segment */ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
((datasegcnt - 1) != i)) { /* set LSP type */
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
i = i - 1;
lsp_just_set = true;
}
j++;
}
out: return num_sge;
}
/** * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data * @phba: The Hba for which this call is being executed. * @sc: pointer to scsi command we're working on * @sgl: pointer to buffer list for protection groups * @datacnt: number of segments of data that have been dma mapped * @protcnt: number of segment of protection data that have been dma mapped * @lpfc_cmd: lpfc scsi command object pointer. * * This function sets up SGL buffer list for protection groups of * type LPFC_PG_TYPE_DIF * * This is usually used when DIFs are in their own buffers, * separate from the data. The HBA can then by instructed * to place the DIFs in the outgoing stream. For read operations, * The HBA could extract the DIFs and place it in DIF buffers. * * The buffer list for this type consists of one or more of the * protection groups described below: * +-------------------------+ * start of first prot group --> | DISEED | * +-------------------------+ * | DIF (Prot SGE) | * +-------------------------+ * | Data SGE | * +-------------------------+ * |more Data SGE's ... (opt)| * +-------------------------+ * start of new prot group --> | DISEED | * +-------------------------+ * | ... | * +-------------------------+ * * Note: It is assumed that both data and protection s/g buffers have been * mapped for DMA * * Returns the number of SGEs added to the SGL.
**/ static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd)
{ struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct sli4_sge_diseed *diseed = NULL;
dma_addr_t dataphysaddr, protphysaddr; unsignedshort curr_prot = 0; unsignedint split_offset; unsignedint protgroup_len, protgroup_offset = 0, protgroup_remainder; unsignedint protgrp_blks, protgrp_bytes; unsignedint remainder, subtotal; int status; unsignedchar pgdone = 0, alldone = 0; unsigned blksize;
uint32_t reftag;
uint8_t txop, rxop;
uint32_t dma_len; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc; #endif
uint32_t checking = 1;
uint32_t dma_offset = 0, num_sge = 0; int j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) goto out;
/* extract some info from the scsi command */
blksize = scsi_prot_interval(sc);
reftag = scsi_prot_ref_tag(sc);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); if (rc) { if (rc & BG_ERR_SWAP)
lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); if (rc & BG_ERR_CHECK)
checking = 0;
} #endif
split_offset = 0; do { /* Check to see if we ran out of space */ if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
!(phba->cfg_xpsgl)) return num_sge + 3;
/* DISEED and DIF have to be together */ if (!((j + 1) % phba->border_sge_num) ||
!((j + 2) % phba->border_sge_num) ||
!((j + 3) % phba->border_sge_num)) {
sgl->word2 = 0;
/* set LSP type */
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
if (unlikely(!sgl_xtra)) { goto out;
} else {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.35 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.