/* A relatively short polling period w/o sleeping, to allow the FW to * complete the ramrod and thus possibly to avoid the following sleeps.
*/ if (!skip_quick_poll) {
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); if (!rc) return 0;
}
/* Move to polling with a sleeping period between iterations */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) return 0;
p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) {
DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); return -EAGAIN;
}
p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain); if (!elem) {
DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); return -EINVAL;
}
*elem = p_ent->elem; /* struct assignment */
/* send a doorbell on the slow hwfn session */
p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
/* make sure the SPQE is updated before the doorbell */
wmb();
/* Need to guarantee the fw_cons index we use points to a usuable * element (to comply with our chain), so our macros would comply
*/ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
qed_chain_get_usable_per_page(p_chain))
fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
/* Complete current segment of eq entries */ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
/* @@@tmp - it's possible we'll eventually want to handle some * actual commands that can arrive here, but for now this is only * used to complete the ramrod using the echo value on the cqe
*/ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
}
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe)
{ int rc;
rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); if (rc)
DP_NOTICE(p_hwfn, "Failed to handle RXQ CQE [cmd 0x%02x]\n",
cqe->ramrod_cmd_id);
/* Locked variant; Should be called while the SPQ lock is taken */ staticvoid __qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
{
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
/** * qed_spq_add_entry() - Add a new entry to the pending list. * Should be used while lock is being held. * * @p_hwfn: HW device data. * @p_ent: An entry to add. * @priority: Desired priority. * * Adds an entry to the pending list is there is room (an empty * element is available in the free_pool), or else places the * entry in the unlimited_pending pool. * * Return: zero on success, -EINVAL on invalid @priority.
*/ staticint qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, enum spq_priority priority)
{ struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) { if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
/* Copy the ring element physical pointer to the new * entry, since we are about to override the entire ring * entry and don't want to lose the pointer.
*/
p_ent->elem.data_ptr = p_en2->elem.data_ptr;
*p_en2 = *p_ent;
/* EBLOCK responsible to free the allocated p_ent */ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent); else
p_ent->post_ent = p_en2;
p_ent = p_en2;
}
}
/* entry is to be placed in 'pending' queue */ switch (priority) { case QED_SPQ_PRIORITY_NORMAL:
list_add_tail(&p_ent->list, &p_spq->pending);
p_spq->normal_count++; break; case QED_SPQ_PRIORITY_HIGH:
list_add(&p_ent->list, &p_spq->pending);
p_spq->high_count++; break; default: return -EINVAL;
}
/* Avoid overriding of SPQ entries when getting out-of-order completions, by * marking the completions in a bitmap and increasing the chain consumer only * for the first successive completed entries.
*/ staticvoid qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
{
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; struct qed_spq *p_spq = p_hwfn->p_spq;
if (!p_ent) {
DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); return -EINVAL;
}
if (p_hwfn->cdev->recov_in_prog) {
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ, "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
p_ent->elem.hdr.protocol_id);
/* Let the flow complete w/o any error handling */
qed_spq_recov_set_ret_code(p_ent, fw_return_code); return 0;
}
/* Complete the entry */
rc = qed_spq_fill_entry(p_hwfn, p_ent);
spin_lock_bh(&p_spq->lock);
/* Check return value after LOCK is taken for cleaner error flow */ if (rc) goto spq_post_fail;
/* Check if entry is in block mode before qed_spq_add_entry, * which might kfree p_ent.
*/
eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); if (rc) goto spq_post_fail;
rc = qed_spq_pend_post(p_hwfn); if (rc) { /* Since it's possible that pending failed for a different * entry [although unlikely], the failed entry was already * dealt with; No need to return it here.
*/
b_ret_ent = false; goto spq_post_fail;
}
spin_unlock_bh(&p_spq->lock);
if (eblock) { /* For entries in QED BLOCK mode, the completion code cannot * perform the necessary cleanup - if it did, we couldn't * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) { struct qed_spq_entry *p_post_ent = p_ent->post_ent;
kfree(p_ent);
/* Return the entry which was actually posted */
p_ent = p_post_ent;
}
if (rc) goto spq_post_fail2;
/* return to pool */
qed_spq_return_entry(p_hwfn, p_ent);
} return rc;
p_spq = p_hwfn->p_spq; if (!p_spq) return -EINVAL;
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) {
list_del(&p_ent->list);
qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent; break;
}
/* This is relatively uncommon - depends on scenarios * which have mutliple per-PF sent ramrods.
*/
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
le16_to_cpu(echo),
le16_to_cpu(p_ent->elem.hdr.echo));
}
/* Release lock before callback, as callback may post * an additional ramrod.
*/
spin_unlock_bh(&p_spq->lock);
if (!found) {
DP_NOTICE(p_hwfn, "Failed to find an entry this EQE [echo %04x] completes\n",
le16_to_cpu(echo)); return -EEXIST;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete EQE [echo %04x]: func %p cookie %p)\n",
le16_to_cpu(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie); if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code); else
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ, "Got a completion without a callback function\n");
if (found->comp_mode != QED_SPQ_MODE_EBLOCK) /* EBLOCK is responsible for returning its own entry into the * free list.
*/
qed_spq_return_entry(p_hwfn, found);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.