/* This might occur on certain instances; Log it once then mask it */
DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
tmp);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
0xffffffff);
/* We've already cleared the timeout interrupt register, so we learn * of interrupts via the validity register
*/
tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) goto out;
/* Read the GRC timeout information */
tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
/* Flush any pending (e)dpms as they may never arrive */
qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
/* wait for usage to zero or count to run out. This is necessary since * EDPM doorbell transactions can take multiple 64b cycles, and as such * can "split" over the pci. Possibly, the doorbell drop can happen with * half an EDPM in the queue and other half dropped. Another EDPM * doorbell to the same address (from doorbell recovery mechanism or * from the doorbelling entity) could have first half dropped and second * half interpreted as continuation of the first. To prevent such * malformed doorbells from reaching the device, flush the queue before * releasing the overflow sticky indication.
*/ while (count-- && usage) {
usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
udelay(QED_DB_REC_INTERVAL);
}
/* should have been depleted by now */ if (usage) {
DP_NOTICE(p_hwfn->cdev, "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); return -EBUSY;
}
return 0;
}
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 attn_ovfl, cur_ovfl; int rc;
overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); if (!overflow) goto out;
/* Run PF doorbell recovery in next periodic handler */
set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
if (!p_hwfn->db_bar_no_edpm) {
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) goto out;
}
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
out: /* Schedule the handler even if overflow was not detected */
qed_periodic_db_rec_start(p_hwfn);
}
int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (int_sts == 0xdeadbeaf) {
DP_NOTICE(p_hwfn->cdev, "DORQ is being reset, skipping int_sts handler\n");
return 0;
}
/* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring.
*/ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0;
DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
/* check if db_drop or overflow happened */ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { /* Obtain data about db drop/overflow */
first_drop_reason = qed_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_REASON) &
QED_DORQ_ATTENTION_REASON_MASK;
details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
address = qed_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS);
all_drops_reason = qed_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_REASON);
/* Log info */
DP_NOTICE(p_hwfn->cdev, "Doorbell drop occurred\n" "Address\t\t0x%08x\t(second BAR address)\n" "FID\t\t0x%04x\t\t(Opaque FID)\n" "Size\t\t0x%04x\t\t(in bytes)\n" "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
address,
GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason);
/* Clear the doorbell drop details and prepare for next drop */
qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
/* Mark interrupt as handled (note: even if drop was due to a different * reason than overflow we mark as handled)
*/
qed_wr(p_hwfn,
p_ptt,
DORQ_REG_INT_STS_WR,
DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
/* If there are no indications other than drop indications, success */ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) return 0;
}
/* Some other indication was present - non recoverable */
DP_INFO(p_hwfn, "DORQ fatal attention\n");
return -EINVAL;
}
staticint qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
{ if (p_hwfn->cdev->recov_in_prog) return 0;
staticvoid qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
{ if (p_hwfn->db_recovery_info.dorq_attn) goto out;
/* Call DORQ callback if the attention was missed */
qed_dorq_attn_cb(p_hwfn);
out:
p_hwfn->db_recovery_info.dorq_attn = false;
}
/* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters.
*/ enum aeu_invert_reg_special_type {
AEU_INVERT_REG_SPECIAL_CNIG_0,
AEU_INVERT_REG_SPECIAL_CNIG_1,
AEU_INVERT_REG_SPECIAL_CNIG_2,
AEU_INVERT_REG_SPECIAL_CNIG_3,
AEU_INVERT_REG_SPECIAL_MAX,
};
staticstruct aeu_invert_reg_bit
aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
{"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
{"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
{"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
{"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
};
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ staticstruct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 1 */
{"GPIO0 function%d",
(32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
}
},
status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
b_clear, &attn_results); if (status != DBG_STATUS_OK)
DP_NOTICE(p_hwfn, "Failed to parse attention information [status: %s]\n",
qed_dbg_get_status_str(status)); else
qed_dbg_parse_attn(p_hwfn, &attn_results);
}
/** * qed_int_deassertion_aeu_bit() - Handles the effects of a single * cause of the attention. * * @p_hwfn: HW device data. * @p_aeu: Descriptor of an AEU bit which caused the attention. * @aeu_en_reg: Register offset of the AEU enable reg. which configured * this bit to this group. * @p_bit_name: AEU bit description for logging purposes. * @bitmask: Index of this bit in the aeu_en_reg. * * Return: Zero on success, negative errno otherwise.
*/ staticint
qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu,
u32 aeu_en_reg, constchar *p_bit_name, u32 bitmask)
{ bool b_fatal = false; int rc = -EINVAL;
u32 val;
/* Reach assertion if attention is fatal */ if (b_fatal)
qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, "`%s': Fatal attention\n",
p_bit_name); else/* If the attention is benign, no need to prevent it */ goto out;
/* Prevent this Attention from being asserted in the future */
val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
p_bit_name);
/* Re-enable FW aassertion (Gen 32) interrupts */
val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
MISC_REG_AEU_ENABLE4_IGU_OUT_0);
val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32;
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
MISC_REG_AEU_ENABLE4_IGU_OUT_0, val);
out: return rc;
}
/** * qed_int_deassertion_parity() - Handle a single parity AEU source. * * @p_hwfn: HW device data. * @p_aeu: Descriptor of an AEU bit which caused the parity. * @aeu_en_reg: Address of the AEU enable register. * @bit_index: Index (0-31) of an AEU bit.
*/ staticvoid qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu,
u32 aeu_en_reg, u8 bit_index)
{
u32 block_id = p_aeu->block_index, mask, val;
DP_NOTICE(p_hwfn->cdev, "%s parity attention is set [address 0x%08x, bit %d]\n",
p_aeu->bit_name, aeu_en_reg, bit_index);
if (block_id != MAX_BLOCK_ID) {
qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
/* In BB, there's a single parity bit for several blocks */ if (block_id == BLOCK_BTB) {
qed_int_attn_print(p_hwfn, BLOCK_OPTE,
ATTN_TYPE_PARITY, false);
qed_int_attn_print(p_hwfn, BLOCK_MCP,
ATTN_TYPE_PARITY, false);
}
}
/* Prevent this parity error from being re-asserted */
mask = ~BIT(bit_index);
val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
p_aeu->bit_name);
}
/* Read the attention registers in the AEU */ for (i = 0; i < NUM_ATTN_REGS; i++) {
aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
MISC_REG_AEU_AFTER_INVERT_1_IGU +
i * 0x4);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Deasserted bits [%d]: %08x\n",
i, aeu_inv_arr[i]);
}
/* Find parity attentions first */ for (i = 0; i < NUM_ATTN_REGS; i++) { struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
u32 parities;
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
/* Skip register in which no parity bit is currently set */
parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; if (!parities) continue;
/* Find non-parity cause for attention and act */ for (k = 0; k < MAX_ATTN_GRPS; k++) { struct aeu_invert_reg_bit *p_aeu;
/* Handle only groups whose attention is currently deasserted */ if (!(deasserted_bits & (1 << k))) continue;
for (i = 0; i < NUM_ATTN_REGS; i++) {
u32 bits;
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
i * sizeof(u32) +
k * sizeof(u32) * NUM_ATTN_REGS;
en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
bits = aeu_inv_arr[i] & en;
/* Skip if no bit from this group is currently set */ if (!bits) continue;
/* Find all set bits from current register which belong * to current group, making them responsible for the * previous assertion.
*/ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { longunsignedint bitmask;
u8 bit, bit_len;
/* Read current attention bits/acks - safeguard against attentions * by guaranting work on a synchronized timeframe
*/ do {
index = p_sb_attn->sb_index; /* finish reading index before the loop condition */
dma_rmb();
attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
} while (index != p_sb_attn->sb_index);
p_sb_attn->sb_index = index;
/* Attention / Deassertion are meaningful (and in correct state) * only when they differ and consistent with known state - deassertion * when previous attention & current ack, and assertion when current * attention with no previous attention
*/
asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
~p_sb_attn_sw->known_attn;
deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
p_sb_attn_sw->known_attn;
/* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW.
*/
barrier();
}
/* Disable ack for def status block. Required both for msix + * inta in non-mask mode, in inta does no harm.
*/
qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
/* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) {
DP_ERR(p_hwfn->cdev, "Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
if (!sb_attn || !sb_attn->sb_attn) {
DP_ERR(p_hwfn->cdev, "Attentions Status block is NULL - cannot check for new attentions!\n");
} else {
u16 tmp_index = sb_attn->index;
/* Check if we expect interrupts at this time. if not just ack them */ if (!(rc & QED_SB_EVENT_MASK)) {
qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return;
}
/* Check the validity of the DPC ptt. If not ack interrupts and fail */ if (!p_hwfn->p_dpc_ptt) {
DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return;
}
if (rc & QED_SB_ATT_IDX)
qed_int_attentions(p_hwfn);
if (rc & QED_SB_IDX) { int pi;
/* Look for a free index */ for (pi = 0; pi < arr_size; pi++) {
pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; if (pi_info->comp_cb)
pi_info->comp_cb(p_hwfn, pi_info->cookie);
}
}
if (sb_attn && (rc & QED_SB_ATT_IDX)) /* This should be done before the interrupts are enabled, * since otherwise a new attention will be generated.
*/
qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
/* Set the pointer to the AEU descriptors */
sb_info->p_aeu_desc = aeu_descs;
/* Calculate Parity Masks */
memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ for (j = 0, k = 0; k < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_aeu;
/* The igu address will hold the absolute address that needs to be * written to for a specific status block
*/ if (IS_PF(p_hwfn->cdev)) {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE +
sb_info->igu_sb_id) << 3);
}
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
/* Set the control register */
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
if (!RESC_NUM(p_hwfn, QED_SB)) {
p_info->b_allow_pf_vf_change = false;
} else { /* Use the numbers the MFW have provided - * don't forget MFW accounts for the default SB as well.
*/
p_info->b_allow_pf_vf_change = true;
if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
DP_INFO(p_hwfn, "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
RESC_NUM(p_hwfn, QED_SB) - 1,
p_info->usage.cnt);
p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
}
if (IS_PF_SRIOV(p_hwfn)) {
u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
if (vfs != p_info->usage.iov_cnt)
DP_VERBOSE(p_hwfn,
NETIF_MSG_INTR, "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
p_info->usage.iov_cnt, vfs);
/* At this point we know how many SBs we have totally * in IGU + number of PF SBs. So we can validate that * we'd have sufficient for VF.
*/ if (vfs > p_info->usage.free_cnt +
p_info->usage.free_cnt_iov - p_info->usage.cnt) {
DP_NOTICE(p_hwfn, "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
p_info->usage.free_cnt +
p_info->usage.free_cnt_iov,
p_info->usage.cnt, vfs); return -EINVAL;
}
/* Currently cap the number of VFs SBs by the * number of VFs.
*/
p_info->usage.iov_cnt = vfs;
}
}
/* Mark all SBs as free, now in the right PF/VFs division */
p_info->usage.free_cnt = p_info->usage.cnt;
p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
p_info->usage.orig = p_info->usage.cnt;
p_info->usage.iov_orig = p_info->usage.iov_cnt;
/* We now proceed to re-configure the IGU cam to reflect the initial * configuration. We can start with the Default SB.
*/
pf_sbs = p_info->usage.cnt;
vf_sbs = p_info->usage.iov_cnt;
for (igu_sb_id = p_info->igu_dsb_id;
igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
p_block = &p_info->entry[igu_sb_id];
val = 0;
if (!(p_block->status & QED_IGU_STATUS_VALID)) continue;
for (igu_sb_id = 0;
igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { /* Read current entry; Notice it might not belong to this PF */
qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
p_block = &p_igu_info->entry[igu_sb_id];
if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
p_igu_info->usage.cnt++;
} elseif (!(p_block->is_pf) &&
(p_block->function_id >= min_vf) &&
(p_block->function_id < max_vf)) { /* Available for VFs of this PF */
p_block->status = QED_IGU_STATUS_VALID |
QED_IGU_STATUS_FREE;
if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
p_igu_info->usage.iov_cnt++;
}
/* Mark the First entry belonging to the PF or its VFs * as the default SB [we'll reset IGU prior to first usage].
*/ if ((p_block->status & QED_IGU_STATUS_VALID) &&
(p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
p_igu_info->igu_dsb_id = igu_sb_id;
p_block->status |= QED_IGU_STATUS_DSB;
}
/* limit number of prints by having each PF print only its * entries with the exception of PF0 which would print * everything.
*/ if ((p_block->status & QED_IGU_STATUS_VALID) ||
(p_hwfn->abs_pf_id == 0)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.