/* * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI * Host Bus Adapters. Refer to the README file included with this package * for driver version and adapter compatibility. * * Copyright (c) 2018 Broadcom. All Rights Reserved. * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. ALL EXPRESS * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. * See the GNU General Public License for more details, a copy of which * can be found in the file COPYING included with this package. * * Contact Information: * linux-drivers@broadcom.com *
*/
completion_check: /* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock); if (!abrt_task || !abrt_task->sc) {
spin_unlock_bh(&session->back_lock); return SUCCESS;
} /* get a task ref till FW processes the req for the ICD used */ if (!iscsi_get_task(abrt_task)) {
spin_unlock(&session->back_lock); /* We are just about to call iscsi_free_task so wait for it. */
udelay(5); goto completion_check;
}
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
phba = beiscsi_conn->phba; /* mark WRB invalid which have been not processed by FW yet */ if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
abrt_io_task->pwrb_handle->pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
abrt_io_task->pwrb_handle->pwrb, 1);
}
inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
spin_unlock_bh(&session->back_lock);
inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); if (!inv_tbl) {
spin_unlock_bh(&session->frwd_lock);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, "BM_%d : invldt_cmd_tbl alloc failed\n"); return FAILED;
}
nents = 0; /* take back_lock to prevent task from getting cleaned up under us */
spin_lock(&session->back_lock); for (i = 0; i < conn->session->cmds_max; i++) {
task = conn->session->cmds[i]; if (!task->sc) continue;
if (sc->device->lun != task->sc->device->lun) continue; /** * Can't fit in more cmds? Normally this won't happen b'coz * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
*/ if (nents == BE_INVLDT_CMD_TBL_SZ) {
more = 1; break;
}
/* get a task ref till FW processes the req for the ICD used */ if (!iscsi_get_task(task)) { /* * The task has completed in the driver and is * completing in libiscsi. Just ignore it here. When we * call iscsi_eh_device_reset, it will wait for us.
*/ continue;
}
io_task = task->dd_data; /* mark WRB invalid which have been not processed by FW yet */ if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
io_task->pwrb_handle->pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
io_task->pwrb_handle->pwrb, 1);
}
staticint beiscsi_enable_pci(struct pci_dev *pcidev)
{ int ret;
ret = pci_enable_device(pcidev); if (ret) {
dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device failed\n"); return ret;
}
ret = pci_request_regions(pcidev, DRV_NAME); if (ret) {
dev_err(&pcidev->dev, "beiscsi_enable_pci - request region failed\n"); goto pci_dev_disable;
}
pci_set_master(pcidev);
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); if (ret) {
ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); goto pci_region_release;
}
} return 0;
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
/* Get ICD count that can be posted on each page */
icd_post_per_page = (PAGE_SIZE / (BE2_SGE * sizeof(struct iscsi_sge)));
align_mask = (icd_post_per_page - 1);
/* Check if icd_start is aligned ICD per page posting */ if (icd_start % icd_post_per_page) {
icd_start_align = ((icd_start +
icd_post_per_page) &
~(align_mask));
phba->fw_config.
iscsi_icd_start[ulp_num] =
icd_start_align;
}
icd_count_align = (icd_count & ~align_mask);
/* ICD discarded in the process of alignment */ if (icd_start_align)
icd_count_unavailable = ((icd_start_align -
icd_start) +
(icd_count -
icd_count_align));
/* no need to rearm if interrupt is only for IOs */
rearm = 0; if (mcc_events) {
queue_work(phba->wq, &pbe_eq->mcc_work); /* rearm for MCCQ */
rearm = 1;
} if (io_events)
irq_poll_sched(&pbe_eq->iopoll);
hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); return IRQ_HANDLED;
}
staticvoid beiscsi_free_irqs(struct beiscsi_hba *phba)
{ struct hwi_context_memory *phwi_context; int i;
if (!phba->pcidev->msix_enabled) { if (phba->pcidev->irq)
free_irq(phba->pcidev->irq, phba); return;
}
phwi_context = phba->phwi_ctrlr->phwi_ctxt; for (i = 0; i <= phba->num_cpus; i++) {
free_irq(pci_irq_vector(phba->pcidev, i),
&phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
}
staticint beiscsi_init_irqs(struct beiscsi_hba *phba)
{ struct pci_dev *pcidev = phba->pcidev; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; int ret, i, j;
if (pcidev->msix_enabled) { for (i = 0; i < phba->num_cpus; i++) {
phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_%02x_%02x",
phba->shost->host_no, i); if (!phba->msi_name[i]) {
ret = -ENOMEM; goto free_msix_irqs;
}
ret = request_irq(pci_irq_vector(pcidev, i),
be_isr_msix, 0, phba->msi_name[i],
&phwi_context->be_eq[i]); if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register msix for i = %d\n",
__func__, i);
kfree(phba->msi_name[i]); goto free_msix_irqs;
}
}
phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
phba->shost->host_no); if (!phba->msi_name[i]) {
ret = -ENOMEM; goto free_msix_irqs;
}
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
__func__);
kfree(phba->msi_name[i]); goto free_msix_irqs;
}
} else {
ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : %s-Failed to register irq\n",
__func__); return ret;
}
} return 0;
free_msix_irqs: for (j = i - 1; j >= 0; j--) {
free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]);
kfree(phba->msi_name[j]);
} return ret;
}
spin_lock_irqsave(&phba->io_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { /* * this can happen if clean_task is called on a task that * failed in xmit_task or alloc_pdu.
*/
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
phba->io_sgl_free_index,
phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
spin_unlock_irqrestore(&phba->io_sgl_lock, flags); return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
phba->io_sgl_hndl_avbl++; if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
phba->io_sgl_free_index = 0; else
phba->io_sgl_free_index++;
spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
}
if (pwrb_handle)
memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
return pwrb_handle;
}
/** * alloc_wrb_handle - To allocate a wrb handle * @phba: The hba pointer * @cid: The cid to use for allocation * @pcontext: ptr to ptr to wrb context * * This happens under session_lock until submission to chip
*/ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsignedint cid, struct hwi_wrb_context **pcontext)
{ struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
/** * free_wrb_handle - To free the wrb handle back to pool * @phba: The hba pointer * @pwrb_context: The context to free from * @pwrb_handle: The wrb_handle to free * * This happens under session_lock until submission to chip
*/ staticvoid
free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, struct wrb_handle *pwrb_handle)
{
beiscsi_put_wrb_handle(pwrb_context,
pwrb_handle,
phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " "wrb_handles_available=%d\n",
pwrb_handle, pwrb_context->free_index,
pwrb_context->wrb_handles_available);
}
if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { /* * this can happen if clean_task is called on a task that * failed in xmit_task or alloc_pdu.
*/
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BM_%d : Double Free in eh SGL ," "eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
phba->eh_sgl_hndl_avbl++; if (phba->eh_sgl_free_index ==
(phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
phba->eh_sgl_free_index = 0; else
phba->eh_sgl_free_index++;
spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
}
cid = beiscsi_conn->beiscsi_conn_cid;
cri = BE_GET_ASYNC_CRI_FROM_CID(cid); /** * This function is invoked to get the right async_handle structure * from a given DEF PDU CQ entry. * * - index in CQ entry gives the vertical index * - address in CQ entry is the offset where the DMA last ended * - final - no more notifications for this PDU
*/ if (is_chip_be2_be3r(phba)) {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
dpl, pdpdu_cqe);
ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
index, pdpdu_cqe);
final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
final, pdpdu_cqe);
} else {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
dpl, pdpdu_cqe);
ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
index, pdpdu_cqe);
final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
final, pdpdu_cqe);
}
/** * DB addr Hi/Lo is same for BE and SKH. * Subtract the dataplacementlength to get to the base.
*/
phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
db_addr_lo, pdpdu_cqe);
phys_addr.u.a32.address_lo -= dpl;
phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
db_addr_hi, pdpdu_cqe);
code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); switch (code) { case UNSOL_HDR_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].header;
*header = 1; break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
fallthrough; case UNSOL_DATA_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].data; break; /* called only for above codes */ default: return NULL;
}
if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
pasync_handle->index != ci) { /* driver bug - if ci does not match async handle index */
error = 1;
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
cid, pasync_handle->is_header ? 'H' : 'D',
pasync_handle->pa.u.a64.address,
pasync_handle->index,
phys_addr.u.a64.address, ci); /* FW has stale address - attempt continuing by dropping */
}
/** * DEF PDU header and data buffers with errors should be simply * dropped as there are no consumers for it.
*/ if (error) {
beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); return NULL;
}
if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n",
cid, code, ci, phys_addr.u.a64.address);
beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
}
list_del_init(&pasync_handle->link); /** * Each CID is associated with unique CRI. * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
**/
pasync_handle->cri = cri;
pasync_handle->is_final = final;
pasync_handle->buffer_len = dpl;
pasync_handle->in_use = 1;
for (prod = 0; prod < nbuf; prod++) { if (header)
pasync_handle = pasync_ctx->async_entry[pi].header; else
pasync_handle = pasync_ctx->async_entry[pi].data;
WARN_ON(pasync_handle->is_header != header);
WARN_ON(pasync_handle->index != pi); /* setup the ring only once */ if (nbuf == pasync_ctx->num_entries) { /* note hi is lo */
pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo;
pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi;
} if (++pi == pasync_ctx->num_entries)
pi = 0;
}
if (header)
pasync_ctx->async_header.pi = pi; else
pasync_ctx->async_data.pi = pi;
cri_index = BE_GET_CRI_FROM_CID(cid);
ep = phba->ep_array[cri_index];
if (ep == NULL) { /* connection has already been freed * just move on to next one
*/
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_INIT, "BM_%d : proc cqe of disconn ep: cid %d\n",
cid); goto proc_next_cqe;
}
switch (code) { case SOL_CMD_COMPLETE:
hwi_complete_cmd(beiscsi_conn, phba, sol); break; case DRIVERMSG_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); break; case UNSOL_HDR_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock);
beiscsi_hdq_process_compl(beiscsi_conn,
(struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock); break; case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock);
beiscsi_hdq_process_compl(beiscsi_conn,
(struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_INVALIDATE_INDEX_NOTIFY: case CMD_INVALIDATED_NOTIFY: case CXN_INVALIDATE_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Ignoring %s[%d] on CID : %d\n",
cqe_desc[code], code, cid); break; case CXN_KILLED_HDR_DIGEST_ERR: case SOL_CMD_KILLED_DATA_DIGEST_ERR:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
cqe_desc[code], code, cid); break; case CMD_KILLED_INVALID_STATSN_RCVD: case CMD_KILLED_INVALID_R2T_RCVD: case CMD_CXN_KILLED_LUN_INVALID: case CMD_CXN_KILLED_ICD_INVALID: case CMD_CXN_KILLED_ITT_INVALID: case CMD_CXN_KILLED_SEQ_OUTOFORDER: case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
cqe_desc[code], code, cid); break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
cqe_desc[code], code, cid);
spin_lock_bh(&phba->async_pdu_lock); /* driver consumes the entry and drops the contents */
beiscsi_hdq_process_compl(beiscsi_conn,
(struct i_t_dpdu_cqe *)sol);
spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: case CXN_KILLED_AHS_RCVD: case CXN_KILLED_UNKNOWN_HDR: case CXN_KILLED_STALE_ITT_TTT_RCVD: case CXN_KILLED_INVALID_ITT_TTT_RCVD: case CXN_KILLED_TIMED_OUT: case CXN_KILLED_FIN_RCVD: case CXN_KILLED_RST_SENT: case CXN_KILLED_RST_RCVD: case CXN_KILLED_BAD_UNSOL_PDU_RCVD: case CXN_KILLED_BAD_WRB_INDEX_ERROR: case CXN_KILLED_OVER_RUN_RESIDUAL: case CXN_KILLED_UNDER_RUN_RESIDUAL: case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Event %s[%d] received on CID : %d\n",
cqe_desc[code], code, cid); if (beiscsi_conn)
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED); break; default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
code, cid); break;
}
proc_next_cqe:
AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
queue_tail_inc(cq);
sol = queue_tail_node(cq);
num_processed++; if (total == budget) break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.