// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
for (i = 0; i < hw->config.n_eq; i++) { conststruct cpumask *maskp;
/* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(efct->pci, i); if (!maskp) {
efc_log_debug(efct, "maskp null for vector:%d\n", i); continue;
}
/* Loop through all CPUs associated with vector idx */
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
hw->wq_cpu_array[cpu] = hw->hw_wq[i];
}
}
/* Initialise RQS pointers */ for (i = 0; i < num_rq_pairs; i++)
rqs[i] = NULL;
/* * Allocate an RQ object SET, where each element in set * encapsulates 2 SLI queues (for rq pair)
*/ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
rq = kzalloc(sizeof(*rq), GFP_KERNEL); if (!rq) goto error;
if (bufindex >= rq_hdr->length) {
efc_log_err(hw->os, "RQidx %d bufidx %d exceed ring len %d for id %d\n",
rqindex, bufindex, rq_hdr->length, rq_hdr->id); return NULL;
}
/* rq_hdr lock also covers rqindex+1 queue */
spin_lock_irqsave(&rq_hdr->lock, flags);
int
efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
u8 *cqe)
{
u16 rq_id;
u32 index; int rqindex; int rq_status;
u32 h_len;
u32 p_len; struct efc_hw_sequence *seq; struct hw_rq *rq;
rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
&rq_id, &index); if (rq_status != 0) { switch (rq_status) { case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: case SLI4_FC_ASYNC_RQ_DMA_FAILURE: /* just get RQ buffer then return to chip */
rqindex = efct_hw_rqpair_find(hw, rq_id); if (rqindex < 0) {
efc_log_debug(hw->os, "status=%#x: lookup fail id=%#x\n",
rq_status, rq_id); break;
}
/* get RQ buffer */
seq = efct_hw_rqpair_get(hw, rqindex, index);
/* return to chip */ if (efct_hw_rqpair_sequence_free(hw, seq)) {
efc_log_debug(hw->os, "status=%#x,fail rtrn buf to RQ\n",
rq_status); break;
} break; case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: /* * since RQ buffers were not consumed, cannot return * them to chip
*/
efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
rq_status);
fallthrough; default: break;
} return -EIO;
}
rqindex = efct_hw_rqpair_find(hw, rq_id); if (rqindex < 0) {
efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
rq_id); return -EIO;
}
/* * Note: The header must be posted last for buffer pair mode because * posting on the header queue posts the payload queue as well. * We do not ring the payload queue independently in RQ pair mode.
*/
qindex_payload = sli_rq_write(&hw->sli, rq_payload,
(void *)phys_payload);
qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr); if (qindex_hdr < 0 ||
qindex_payload < 0) {
efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
spin_unlock_irqrestore(&rq_hdr->lock, flags); return -EIO;
}
/* ensure the indexes are the same */
WARN_ON(qindex_hdr != qindex_payload);
/* Update the lookup table */ if (!rq->rq_tracker[qindex_hdr]) {
rq->rq_tracker[qindex_hdr] = seq;
} else {
efc_log_debug(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
hw_rq_index, qindex_hdr);
}
int
efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
{ int rc = 0;
/* * Post the data buffer first. Because in RQ pair mode, ringing the * doorbell of the header ring will post the data buffer as well.
*/ if (efct_hw_rqpair_put(hw, seq)) {
efc_log_err(hw->os, "error writing buffers\n"); return -EIO;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.