int ql2xfulldump_on_mpifail;
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail, "Set this to take full dump on MPI hang.");
int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit, "Enforce IOCB throttling, to avoid FW congestion. (default: 2) " "1: track usage per queue, 2: track usage per adapter");
staticint ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2, "Specify if Class 2 operations are supported from the very " "beginning. Default is 0 - class 2 not supported.");
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout, "Login timeout value in seconds.");
int qlport_down_retry;
module_param(qlport_down_retry, int, S_IRUGO);
MODULE_PARM_DESC(qlport_down_retry, "Maximum number of command retries to a port that returns " "a PORT-DOWN status.");
int ql2xplogiabsentdevice;
module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xplogiabsentdevice, "Option to enable PLOGI to devices that are not present after " "a Fabric scan. This is needed for several broken switches. " "Default is 0 - no PLOGI. 1 - perform PLOGI.");
int ql2xloginretrycount;
module_param(ql2xloginretrycount, int, S_IRUGO);
MODULE_PARM_DESC(ql2xloginretrycount, "Specify an alternate value for the NVRAM login retry count.");
int ql2xallocfwdump = 1;
module_param(ql2xallocfwdump, int, S_IRUGO);
MODULE_PARM_DESC(ql2xallocfwdump, "Option to enable allocation of memory for a firmware dump " "during HBA initialization. Memory allocation requirements " "vary by ISP type. Default is 1 - allocate memory.");
int ql2xextended_error_logging;
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging, "Option to enable extended error logging,\n" "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n" "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n" "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n" "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n" "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n" "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n" "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n" "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n" "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n" "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n" "\t\t0x1e400000 - Preferred value for capturing essential " "debug information (equivalent to old " "ql2xextended_error_logging=1).\n" "\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xextended_error_logging_ktrace = 1;
module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging_ktrace, "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd, "Set to control shifting of command type processing " "based on total number of SG elements.");
int ql2xfdmienable = 1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI registrations. " "1 - provide FDMI registrations (default).");
#define MAX_Q_DEPTH 64 staticint ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to set for each LUN. " "Default is 64.");
int ql2xenabledif = 2;
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF:\n" " Default is 2.\n" " 0 -- No DIF Support\n" " 1 -- Enable DIF for all types\n" " 2 -- Enable DIF for all types, except Type 0.\n");
#if (IS_ENABLED(CONFIG_NVME_FC)) int ql2xnvmeenable = 1; #else int ql2xnvmeenable; #endif
module_param(ql2xnvmeenable, int, 0644);
MODULE_PARM_DESC(ql2xnvmeenable, "Enables NVME support. " "0 - no NVMe. Default is Y");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk, " Enable T10-CRC-DIF Error isolation by HBA:\n" " Default is 2.\n" " 0 -- Error isolation disabled\n" " 1 -- Error isolation enabled only for DIX Type 0\n" " 2 -- Error isolation enabled for all Types\n");
int ql2xmqsupport = 1;
module_param(ql2xmqsupport, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmqsupport, "Enable on demand multiple queue pairs support " "Default is 1 for supported. " "Set it to 0 to turn off mq qpair support.");
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the request_firmware() (hotplug).\n" " interface.\n" " 1 -- load firmware from flash.\n" " 0 -- use default semantics.\n");
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdbwr, "Option to specify scheme for request queue posting.\n" " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n");
int ql2xgffidenable;
module_param(ql2xgffidenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xgffidenable, "Enables GFF_ID checks of port type. " "Default is 0 - Do not use GFF_ID information.");
int ql2xasynctmfenable = 1;
module_param(ql2xasynctmfenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xasynctmfenable, "Enables issue of TM IOCBs asynchronously via IOCB mechanism" "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdontresethba, "Option to specify reset behaviour.\n" " 0 (Default) -- Reset on failure.\n" " 1 -- Do not reset on failure.\n");
uint64_t ql2xmaxlun = MAX_LUNS;
module_param(ql2xmaxlun, ullong, S_IRUGO);
MODULE_PARM_DESC(ql2xmaxlun, "Defines the maximum LU number to register with the SCSI " "midlayer. Default is 65535.");
int ql2xmdcapmask = 0x1F;
module_param(ql2xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdcapmask, "Set the Minidump driver capture mask level. " "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
int ql2xexlogins;
module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xexlogins, "Number of extended Logins. " "0 (Default)- Disabled.");
int ql2xexchoffld = 1024;
module_param(ql2xexchoffld, uint, 0644);
MODULE_PARM_DESC(ql2xexchoffld, "Number of target exchanges.");
int ql2xiniexchg = 1024;
module_param(ql2xiniexchg, uint, 0644);
MODULE_PARM_DESC(ql2xiniexchg, "Number of initiator exchanges.");
int ql2xfwholdabts;
module_param(ql2xfwholdabts, int, S_IRUGO);
MODULE_PARM_DESC(ql2xfwholdabts, "Allow FW to hold status IOCB until ABTS rsp received. " "0 (Default) Do not set fw option. " "1 - Set fw option to hold ABTS.");
int ql2xmvasynctoatio = 1;
module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmvasynctoatio, "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ" "0 (Default). Do not move IOCBs" "1 - Move IOCBs.");
int ql2xautodetectsfp = 1;
module_param(ql2xautodetectsfp, int, 0444);
MODULE_PARM_DESC(ql2xautodetectsfp, "Detect SFP range and set appropriate distance.\n" "1 (Default): Enable\n");
int ql2xenablemsix = 1;
module_param(ql2xenablemsix, int, 0444);
MODULE_PARM_DESC(ql2xenablemsix, "Set to enable MSI or MSI-X interrupt mechanism.\n" " Default is 1, enable MSI-X interrupt mechanism.\n" " 0 -- enable traditional pin-based mechanism.\n" " 1 -- enable MSI-X interrupt mechanism.\n" " 2 -- enable MSI interrupt mechanism.\n");
int qla2xuseresexchforels;
module_param(qla2xuseresexchforels, int, 0444);
MODULE_PARM_DESC(qla2xuseresexchforels, "Reserve 1/2 of emergency exchanges for ELS.\n" " 0 (default): disabled");
staticint ql2xprotmask;
module_param(ql2xprotmask, int, 0644);
MODULE_PARM_DESC(ql2xprotmask, "Override DIF/DIX protection capabilities mask\n" "Default is 0 which sets protection mask based on " "capabilities reported by HBA firmware.\n");
staticint ql2xprotguard;
module_param(ql2xprotguard, int, 0644);
MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" " 0 -- Let HBA firmware decide\n" " 1 -- Force T10 CRC\n" " 2 -- Force IP checksum\n");
int ql2xdifbundlinginternalbuffers;
module_param(ql2xdifbundlinginternalbuffers, int, 0644);
MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, "Force using internal buffers for DIF information\n" "0 (Default). Based on check.\n" "1 Force using internal buffers\n");
int ql2xsmartsan;
module_param(ql2xsmartsan, int, 0444);
module_param_named(smartsan, ql2xsmartsan, int, 0444);
MODULE_PARM_DESC(ql2xsmartsan, "Send SmartSAN Management Attributes for FDMI Registration." " Default is 0 - No SmartSAN registration," " 1 - Register SmartSAN Management Attributes.");
int ql2xrdpenable;
module_param(ql2xrdpenable, int, 0444);
module_param_named(rdpenable, ql2xrdpenable, int, 0444);
MODULE_PARM_DESC(ql2xrdpenable, "Enables RDP responses. " "0 - no RDP responses (default). " "1 - provide RDP responses."); int ql2xabts_wait_nvme = 1;
module_param(ql2xabts_wait_nvme, int, 0444);
MODULE_PARM_DESC(ql2xabts_wait_nvme, "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
static u32 ql2xdelay_before_pci_error_handling = 5;
module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
module_param(ql2xnvme_queues, uint, S_IRUGO);
MODULE_PARM_DESC(ql2xnvme_queues, "Number of NVMe Queues that can be configured.\n" "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" "1 - Minimum number of queues supported\n" "8 - Default value");
int ql2xfc2target = 1;
module_param(ql2xfc2target, int, 0444);
MODULE_PARM_DESC(qla2xfc2target, "Enables FC2 Target support. " "0 - FC2 Target support is disabled. " "1 - FC2 Target support is enabled (default).");
staticinlinevoid
qla2x00_restart_timer(scsi_qla_host_t *vha, unsignedlong interval)
{ /* Currently used for 82XX only. */ if (vha->device_flags & DFLG_DEV_FAILED) {
ql_dbg(ql_dbg_timer, vha, 0x600d, "Device in a failed state, returning.\n"); return;
}
ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
GFP_KERNEL); if (!ha->req_q_map) {
ql_log(ql_log_fatal, vha, 0x003b, "Unable to allocate memory for request queue ptrs.\n"); goto fail_req_map;
}
ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
GFP_KERNEL); if (!ha->rsp_q_map) {
ql_log(ql_log_fatal, vha, 0x003c, "Unable to allocate memory for response queue ptrs.\n"); goto fail_rsp_map;
}
ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); if (ha->base_qpair == NULL) {
ql_log(ql_log_warn, vha, 0x00e0, "Failed to allocate base queue pair memory.\n"); goto fail_base_qpair;
}
qla_init_base_qpair(vha, req, rsp);
if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL); if (!ha->queue_pair_map) {
ql_log(ql_log_fatal, vha, 0x0180, "Unable to allocate memory for queue pair ptrs.\n"); goto fail_qpair_map;
} if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
kfree(ha->queue_pair_map);
ha->queue_pair_map = NULL; goto fail_qpair_map;
}
}
/* * Make sure we record at least the request and response queue zero in * case we need to free them if part of the probe fails.
*/
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map); return 0;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) { struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
/* * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP.
*/ if (fcport->retry_delay_timestamp == 0) { /* retry delay not set */
} elseif (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0; else goto qc24_target_busy;
/* * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP.
*/ if (fcport->retry_delay_timestamp == 0) { /* retry delay not set */
} elseif (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0; else goto qc24_target_busy;
list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->deleted != QLA_SESS_DELETED) { /* session(s) may not be fully logged in * (ie fcport_count=0), but session * deletion thread(s) may be inflight.
*/
res = 0; break;
}
}
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
}
/* * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery
*/ void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
u8 i;
qla2x00_mark_all_devices_lost(vha);
for (i = 0; i < 10; i++) { if (wait_event_timeout(vha->fcport_waitQ,
test_fcport_count(vha), HZ) > 0) break;
}
flush_workqueue(vha->hw->wq);
}
/* * qla2x00_wait_for_hba_ready * Wait till the HBA is ready before doing driver unload * * Input: * ha - pointer to host adapter structure * * Note: * Does context switching-Release SPIN_LOCK * (if any) before calling this routine. *
*/ staticvoid
qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{ struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
while ((qla2x00_reset_active(vha) || ha->dpc_active ||
ha->flags.mbox_busy) ||
test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { if (test_bit(UNLOADING, &base_vha->dpc_flags)) break;
msleep(1000);
}
}
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{ int return_status; unsignedlong wait_reset; struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* * Abort will release the original Command/sp from FW. Let the * original command call scsi_done. In return, he will wakeup * this sleeping thread.
*/
rval = ha->isp_ops->abort_command(sp);
spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt]; if (!sp) continue; if (sp->type != SRB_SCSI_CMD) continue; if (vha->vp_idx != sp->vha->vp_idx) continue;
match = 0;
cmd = GET_CMD_SP(sp); switch (type) { case WAIT_HOST:
match = 1; break; case WAIT_TARGET: if (sp->fcport)
match = sp->fcport->d_id.b24 == t; else
match = 0; break; case WAIT_LUN: if (sp->fcport)
match = (sp->fcport->d_id.b24 == t &&
cmd->device->lun == l); else
match = 0; break;
} if (!match) continue;
/* * SRB_SCSI_CMD is still in the outstanding_cmds array. * it means scsi_done has not called. Wait for it to * clear from outstanding_cmds.
*/
msleep(ABORT_POLLING_PERIOD);
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
found = true;
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (!found) break;
}
if (wait_iter == -1)
status = QLA_FUNCTION_FAILED;
return status;
}
int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsignedint t,
uint64_t l, enum nexus_wait_type type)
{ struct qla_qpair *qpair; struct qla_hw_data *ha = vha->hw; int i, status = QLA_SUCCESS;
status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
type); for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
qpair = ha->queue_pair_map[i]; if (!qpair) continue;
status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
type);
} return status;
}
staticchar *reset_errors[] = { "HBA not online", "HBA not ready", "Task management failed", "Waiting for command completions",
};
/************************************************************************** * qla2xxx_eh_bus_reset * * Description: * The bus reset function will reset the bus and abort any executing * commands. * * Input: * cmd = Linux SCSI command packet of the command that cause the * bus reset. * * Returns: * SUCCESS/FAILURE (defined as macro in scsi.h). *
**************************************************************************/ staticint
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host); int ret = FAILED; unsignedint id;
uint64_t lun; struct qla_hw_data *ha = vha->hw;
/* * No point in issuing another reset if one is active. Also do not * attempt a reset if we are updating flash.
*/ if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) goto eh_host_reset_lock;
if (vha != base_vha) { if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock;
} else { if (IS_P3P_TYPE(vha->hw)) { if (!qla82xx_fcoe_ctx_reset(vha)) { /* Ctx reset success */
ret = SUCCESS; goto eh_host_reset_lock;
} /* fall thru if ctx reset failed */
} if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); /* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x802a, "wait for hba online failed.\n"); goto eh_host_reset_lock;
}
}
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
/* Waiting for command to be returned to OS.*/ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
QLA_SUCCESS)
ret = SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha);
ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d, "full_login_lip=%d.\n", ret);
}
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha); if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e, "lip_reset failed (%d).\n", ret);
}
/* Issue marker command only when we are going to start the I/O */
vha->marker_needed = 1;
return QLA_SUCCESS;
}
/* * The caller must ensure that no completion interrupts will happen * while this function is in progress.
*/ staticvoid qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, constint res, unsignedlong *flags)
__releases(qp->qp_lock_ptr)
__acquires(qp->qp_lock_ptr)
{
DECLARE_COMPLETION_ONSTACK(comp);
scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); int rval; bool ret_cmd;
uint32_t ratov_j;
lockdep_assert_held(qp->qp_lock_ptr);
if (qla2x00_chip_is_down(vha)) {
sp->done(sp, res); return;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags); switch (sp->type) { case SRB_SCSI_CMD: if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res); break; default: if (ret_cmd)
sp->done(sp, res); break;
}
} else {
sp->done(sp, res);
}
}
/* * The caller must ensure that no completion interrupts will happen * while this function is in progress.
*/ staticvoid
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{ int cnt; unsignedlong flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_cmd *cmd;
if (!ha->req_q_map) return;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
req = qp->req; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt]; if (sp) { if (qla2x00_chip_is_down(vha)) {
req->outstanding_cmds[cnt] = NULL;
sp->done(sp, res); continue;
}
/* * The caller must ensure that no completion interrupts will happen * while this function is in progress.
*/ void
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{ int que; struct qla_hw_data *ha = vha->hw;
/* Continue only if initialization complete. */ if (!ha->base_qpair) return;
__qla2x00_abort_all_cmds(ha->base_qpair, res);
if (!ha->queue_pair_map) return; for (que = 0; que < ha->max_qpairs; que++) { if (!ha->queue_pair_map[que]) continue;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
pci_name(ha->pdev)); goto iospace_error_exit;
} if (!(ha->bars & 1)) goto skip_pio;
/* We only need PIO for Flash operations on ISP2312 v2 chips. */
pio = pci_resource_start(ha->pdev, 0); if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
ql_log_pci(ql_log_warn, ha->pdev, 0x0012, "Invalid pci I/O region size (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
} else {
ql_log_pci(ql_log_warn, ha->pdev, 0x0013, "Region #0 no a PIO resource (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
ha->pio_address = pio;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014, "PIO address=%llu.\n",
(unsignedlonglong)ha->pio_address);
skip_pio: /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0015, "Region #1 not an MMIO resource (%s), aborting.\n",
pci_name(ha->pdev)); goto iospace_error_exit;
} if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0016, "Invalid PCI mem region size (%s), aborting.\n",
pci_name(ha->pdev)); goto iospace_error_exit;
}
/* 64bit PCI BAR - BAR2 will correspoond to region 4 */ /* 83XX 26XX always use MQ type access for queues
* - mbar 2, a.k.a region 4 */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
pci_resource_len(ha->pdev, 4));
if (!ha->mqiobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x011d, "BAR2/region4 not enabled\n"); goto mqiobase_exit;
}
ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
pci_resource_len(ha->pdev, 2)); if (ha->msixbase) { /* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1; /* * By default, driver uses at least two msix vectors * (default & rspq)
*/ if (ql2xmqsupport || ql2xnvmeenable) { /* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
/* ATIOQ needs 1 vector. That's 1 less QPair */ if (QLA_TGT_MODE_ENABLED())
ha->max_req_queues--;
ha->max_rsp_queues = ha->max_req_queues;
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3, "Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c, "MSI-X vector count: %d.\n", ha->msix_count);
} else
ql_log_pci(ql_log_info, ha->pdev, 0x011e, "BAR 1 not enabled.\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.