// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
* Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_dbg_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
/* Memory groups enum */
enum mem_groups {
MEM_GROUP_PXP_MEM,
MEM_GROUP_DMAE_MEM,
MEM_GROUP_CM_MEM,
MEM_GROUP_QM_MEM,
MEM_GROUP_DORQ_MEM,
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
MEM_GROUP_SDM_MEM,
MEM_GROUP_PBUF,
MEM_GROUP_IOR,
MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
MEM_GROUP_RDIF_CTX,
MEM_GROUP_TDIF_CTX,
MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_CAU_MEM_EXT,
MEM_GROUP_PXP_ILT,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
MEM_GROUP_BMB_RAM,
MEM_GROUP_BMB_MEM,
MEM_GROUP_TM_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUPS_NUM
};
/* Memory groups names */
static const char * const s_mem_group_names[] = {
"PXP_MEM" ,
"DMAE_MEM" ,
"CM_MEM" ,
"QM_MEM" ,
"DORQ_MEM" ,
"BRB_RAM" ,
"BRB_MEM" ,
"PRS_MEM" ,
"SDM_MEM" ,
"PBUF" ,
"IOR" ,
"RAM" ,
"BTB_RAM" ,
"RDIF_CTX" ,
"TDIF_CTX" ,
"CFC_MEM" ,
"CONN_CFC_MEM" ,
"CAU_PI" ,
"CAU_MEM" ,
"CAU_MEM_EXT" ,
"PXP_ILT" ,
"MULD_MEM" ,
"BTB_MEM" ,
"IGU_MEM" ,
"IGU_MSIX" ,
"CAU_SB" ,
"BMB_RAM" ,
"BMB_MEM" ,
"TM_MEM" ,
"TASK_CFC_MEM" ,
};
/* Idle check conditions */
static u32 cond5(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
}
static u32 cond7(const u32 *r, const u32 *imm)
{
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
}
static u32 cond9(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) !=
(((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
}
static u32 cond10(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
}
static u32 cond4(const u32 *r, const u32 *imm)
{
return (r[0] & ~imm[0]) != imm[1];
}
static u32 cond0(const u32 *r, const u32 *imm)
{
return (r[0] & ~r[1]) != imm[0];
}
static u32 cond14(const u32 *r, const u32 *imm)
{
return (r[0] | imm[0]) != imm[1];
}
static u32 cond1(const u32 *r, const u32 *imm)
{
return r[0] != imm[0];
}
static u32 cond11(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] == imm[0];
}
static u32 cond12(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] > imm[0];
}
static u32 cond3(const u32 *r, const u32 *imm)
{
return r[0] != r[1];
}
static u32 cond13(const u32 *r, const u32 *imm)
{
return r[0] & imm[0];
}
static u32 cond8(const u32 *r, const u32 *imm)
{
return r[0] < (r[1] - imm[0]);
}
static u32 cond2(const u32 *r, const u32 *imm)
{
return r[0] > imm[0];
}
/* Array of Idle Check conditions */
static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond0,
cond1,
cond2,
cond3,
cond4,
cond5,
cond6,
cond7,
cond8,
cond9,
cond10,
cond11,
cond12,
cond13,
cond14,
};
#define NUM_PHYS_BLOCKS 84
#define NUM_DBG_RESET_REGS 8
/******************************* Data Types **********************************/
enum hw_types {
HW_TYPE_ASIC,
PLATFORM_RESERVED,
PLATFORM_RESERVED2,
PLATFORM_RESERVED3,
PLATFORM_RESERVED4,
MAX_HW_TYPES
};
/* CM context types */
enum cm_ctx_types {
CM_CTX_CONN_AG,
CM_CTX_CONN_ST,
CM_CTX_TASK_AG,
CM_CTX_TASK_ST,
NUM_CM_CTX_TYPES
};
/* Debug bus frame modes */
enum dbg_bus_frame_modes {
DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
DBG_BUS_NUM_FRAME_MODES
};
/* Debug bus SEMI frame modes */
enum dbg_bus_semi_frame_modes {
DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
DBG_BUS_SEMI_NUM_FRAME_MODES
};
/* Debug bus filter types */
enum dbg_bus_filter_types {
DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
DBG_BUS_FILTER_TYPE_ON /* Filter always on */
};
/* Debug bus pre-trigger recording types */
enum dbg_bus_pre_trigger_types {
DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
};
/* Debug bus post-trigger recording types */
enum dbg_bus_post_trigger_types {
DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
};
/* Debug bus other engine mode */
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
};
/* DBG block Framing mode definitions */
struct framing_mode_defs {
u8 id;
u8 blocks_dword_mask;
u8 storms_dword_mask;
u8 semi_framing_mode_id;
u8 full_buf_thr;
};
/* Chip constant definitions */
struct chip_defs {
const char *name;
u8 dwords_per_cycle;
u8 num_framing_modes;
u32 num_ilt_pages;
struct framing_mode_defs *framing_modes;
};
/* HW type constant definitions */
struct hw_type_defs {
const char *name;
u32 delay_factor;
u32 dmae_thresh;
u32 log_thresh;
};
/* RBC reset definitions */
struct rbc_reset_defs {
u32 reset_reg_addr;
u32 reset_val[MAX_CHIP_IDS];
};
/* Storm constant definitions.
* Addresses are in bytes, sizes are in quad-regs.
*/
struct storm_defs {
char letter;
enum block_id sem_block_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
bool has_vfc;
u32 sem_fast_mem_addr;
u32 sem_frame_mode_addr;
u32 sem_slow_enable_addr;
u32 sem_slow_mode_addr;
u32 sem_slow_mode1_conf_addr;
u32 sem_sync_dbg_empty_addr;
u32 sem_gpre_vect_addr;
u32 cm_ctx_wr_addr;
u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
};
/* Debug Bus Constraint operation constant definitions */
struct dbg_bus_constraint_op_defs {
u8 hw_op_val;
bool is_cyclic;
};
/* Storm Mode definitions */
struct storm_mode_defs {
const char *name;
bool is_fast_dbg;
u8 id_in_hw;
u32 src_disable_reg_addr;
u32 src_enable_val;
bool exists[MAX_CHIP_IDS];
};
struct grc_param_defs {
u32 default_val[MAX_CHIP_IDS];
u32 min;
u32 max;
bool is_preset;
bool is_persistent;
u32 exclude_all_preset_val;
u32 crash_preset_val[MAX_CHIP_IDS];
};
/* Address is in 128b units. Width is in bits. */
struct rss_mem_defs {
const char *mem_name;
const char *type_name;
u32 addr;
u32 entry_width;
u32 num_entries[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
const char *mem_name;
const char *type_name;
u32 base_row;
u32 num_rows;
};
struct big_ram_defs {
const char *instance_name;
enum mem_groups mem_group_id;
enum mem_groups ram_mem_group_id;
enum dbg_grc_params grc_param;
u32 addr_reg_addr;
u32 data_reg_addr;
u32 is_256b_reg_addr;
u32 is_256b_bit_offset[MAX_CHIP_IDS];
u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
};
struct phy_defs {
const char *phy_name;
/* PHY base GRC address */
u32 base_addr;
/* Relative address of indirect TBUS address register (bits 0..7) */
u32 tbus_addr_lo_addr;
/* Relative address of indirect TBUS address register (bits 8..10) */
u32 tbus_addr_hi_addr;
/* Relative address of indirect TBUS data register (bits 0..7) */
u32 tbus_data_lo_addr;
/* Relative address of indirect TBUS data register (bits 8..11) */
u32 tbus_data_hi_addr;
};
/* Split type definitions */
struct split_type_defs {
const char *name;
};
/******************************** Constants **********************************/
#define BYTES_IN_DWORD sizeof (u32)
/* In the macros below, size and offset are specified in bits */
#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
#define FIELD_DWORD_OFFSET(type, field) \
((int )(FIELD_BIT_OFFSET(type, field) / 32))
#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
FIELD_DWORD_SHIFT(type, field))
#define SET_VAR_FIELD(var, type, field, val) \
do { \
var[FIELD_DWORD_OFFSET(type, field)] &= \
(~FIELD_BIT_MASK(type, field)); \
var[FIELD_DWORD_OFFSET(type, field)] |= \
(val) << FIELD_DWORD_SHIFT(type, field); \
} while (0)
#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
/* extra lines include a signature line + optional latency events line */
#define NUM_EXTRA_DBG_LINES(block) \
(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
#define NUM_DBG_LINES(block) \
((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
#define USE_DMAE true
#define PROTECT_WIDE_BUS true
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
#define REG_DUMP_LEN_SHIFT 24
#define MEM_DUMP_ENTRY_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof (struct dbg_dump_mem))
#define IDLE_CHK_RULE_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof (struct dbg_idle_chk_rule))
#define IDLE_CHK_RESULT_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof (struct dbg_idle_chk_result_hdr))
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof (struct dbg_idle_chk_result_reg_hdr))
#define PAGE_MEM_DESC_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof (struct phys_mem_desc))
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
#define VFC_CAM_CMD_STRUCT_SIZE 64
#define VFC_CAM_CMD_ROW_OFFSET 48
#define VFC_CAM_CMD_ROW_SIZE 9
#define VFC_CAM_ADDR_STRUCT_SIZE 16
#define VFC_CAM_ADDR_OP_OFFSET 0
#define VFC_CAM_ADDR_OP_SIZE 4
#define VFC_CAM_RESP_STRUCT_SIZE 256
#define VFC_RAM_ADDR_STRUCT_SIZE 16
#define VFC_RAM_ADDR_OP_OFFSET 0
#define VFC_RAM_ADDR_OP_SIZE 2
#define VFC_RAM_ADDR_ROW_OFFSET 2
#define VFC_RAM_ADDR_ROW_SIZE 10
#define VFC_RAM_RESP_STRUCT_SIZE 256
#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
#define NUM_VFC_RAM_TYPES 4
#define VFC_CAM_NUM_ROWS 512
#define VFC_OPCODE_CAM_RD 14
#define VFC_OPCODE_RAM_RD 0
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
#define BIG_RAM_NAME_LEN 3
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
#define RESET_REG_UNRESET_OFFSET 4
#define STALL_DELAY_MS 500
#define STATIC_DEBUG_LINE_DWORDS 9
#define NUM_COMMON_GLOBAL_PARAMS 10
#define MAX_RECURSION_DEPTH 10
#define FW_IMG_KUKU 0
#define FW_IMG_MAIN 1
#define FW_IMG_L2B 2
#define REG_FIFO_ELEMENT_DWORDS 2
#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
#define IGU_FIFO_ELEMENT_DWORDS 4
#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
PROTECTION_OVERRIDE_ELEMENT_DWORDS)
#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
#define MAX_SW_PLTAFORM_STR_SIZE 64
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
/* DBG block framing mode definitions, in descending preference order */
static struct framing_mode_defs s_framing_mode_defs[4] = {
{DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
DBG_BUS_SEMI_FRAME_MODE_4FAST,
10},
{DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
10},
{DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
{DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
};
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
{"bb" , 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
s_framing_mode_defs},
{"ah" , 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
s_framing_mode_defs}
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T' , BLOCK_TSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
true ,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
TCM_REG_CTX_RBC_ACCS,
{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
TCM_REG_SM_TASK_CTX},
{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
},
/* Mstorm */
{'M' , BLOCK_MSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
false ,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE,
MSEM_REG_SLOW_DBG_ACTIVE,
MSEM_REG_SLOW_DBG_MODE,
MSEM_REG_DBG_MODE1_CFG,
MSEM_REG_SYNC_DBG_EMPTY,
MSEM_REG_DBG_GPRE_VECT,
MCM_REG_CTX_RBC_ACCS,
{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
MCM_REG_SM_TASK_CTX },
{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
},
/* Ustorm */
{'U' , BLOCK_USEM,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
false ,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE,
USEM_REG_SLOW_DBG_ACTIVE,
USEM_REG_SLOW_DBG_MODE,
USEM_REG_DBG_MODE1_CFG,
USEM_REG_SYNC_DBG_EMPTY,
USEM_REG_DBG_GPRE_VECT,
UCM_REG_CTX_RBC_ACCS,
{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
UCM_REG_SM_TASK_CTX},
{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
},
/* Xstorm */
{'X' , BLOCK_XSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
false ,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE,
XSEM_REG_SLOW_DBG_ACTIVE,
XSEM_REG_SLOW_DBG_MODE,
XSEM_REG_DBG_MODE1_CFG,
XSEM_REG_SYNC_DBG_EMPTY,
XSEM_REG_DBG_GPRE_VECT,
XCM_REG_CTX_RBC_ACCS,
{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
{{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
},
/* Ystorm */
{'Y' , BLOCK_YSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
false ,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE,
YSEM_REG_SLOW_DBG_ACTIVE,
YSEM_REG_SLOW_DBG_MODE,
YSEM_REG_DBG_MODE1_CFG,
YSEM_REG_SYNC_DBG_EMPTY,
YSEM_REG_DBG_GPRE_VECT,
YCM_REG_CTX_RBC_ACCS,
{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
YCM_REG_SM_TASK_CTX},
{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
},
/* Pstorm */
{'P' , BLOCK_PSEM,
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
true ,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE,
PSEM_REG_SLOW_DBG_ACTIVE,
PSEM_REG_SLOW_DBG_MODE,
PSEM_REG_DBG_MODE1_CFG,
PSEM_REG_SYNC_DBG_EMPTY,
PSEM_REG_DBG_GPRE_VECT,
PCM_REG_CTX_RBC_ACCS,
{0, PCM_REG_SM_CON_CTX, 0, 0},
{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
},
};
static struct hw_type_defs s_hw_type_defs[] = {
/* HW_TYPE_ASIC */
{"asic" , 1, 256, 32768},
{"reserved" , 0, 0, 0},
{"reserved2" , 0, 0, 0},
{"reserved3" , 0, 0, 0},
{"reserved4" , 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MSTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_USTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_XSTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_YSTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PSTORM */
{{1, 1}, 0, 1, false , false , 1, {1, 1}},
/* DBG_GRC_PARAM_DUMP_REGS */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RAM */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PBUF */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IOR */
{{0, 0}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_VFC */
{{0, 0}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_ILT */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_RSS */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CAU */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_QM */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_MCP */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DORQ */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_CFC */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_IGU */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BRB */
{{0, 0}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BTB */
{{0, 0}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_BMB */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_RESERVED1 */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_MULD */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PRS */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DMAE */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_TM */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_SDM */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_DIF */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_STATIC */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_UNSTALL */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_RESERVED2 */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
{{0, 0}, 1, 0xffffffff, false , true , 0, {0, 0}},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
{{0, 0}, 0, 1, true , false , 0, {0, 0}},
/* DBG_GRC_PARAM_CRASH */
{{0, 0}, 0, 1, true , false , 0, {0, 0}},
/* DBG_GRC_PARAM_PARITY_SAFE */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_CM */
{{1, 1}, 0, 1, false , false , 0, {1, 1}},
/* DBG_GRC_PARAM_DUMP_PHY */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_NO_MCP */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_NO_FW_VER */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_RESERVED3 */
{{0, 0}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
{{0, 1}, 0, 1, false , false , 0, {0, 1}},
/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
{{1, 1}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
{{1, 1}, 0, 1, false , false , 0, {0, 0}},
/* DBG_GRC_PARAM_DUMP_CAU_EXT */
{{0, 0}, 0, 1, false , false , 0, {1, 1}}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{"rss_mem_cid" , "rss_cid" , 0, 32,
{256, 320}},
{"rss_mem_key_msb" , "rss_key" , 1024, 256,
{128, 208}},
{"rss_mem_key_lsb" , "rss_key" , 2048, 64,
{128, 208}},
{"rss_mem_info" , "rss_info" , 3072, 16,
{128, 208}},
{"rss_mem_ind" , "rss_ind" , 4096, 16,
{16384, 26624}}
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
{"vfc_ram_tt1" , "vfc_ram" , 0, 512},
{"vfc_ram_mtt2" , "vfc_ram" , 512, 128},
{"vfc_ram_stt2" , "vfc_ram" , 640, 32},
{"vfc_ram_ro_vect" , "vfc_ram" , 672, 32}
};
static struct big_ram_defs s_big_ram_defs[] = {
{"BRB" , MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
MISC_REG_BLOCK_256B_EN, {0, 0},
{153600, 180224}},
{"BTB" , MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
MISC_REG_BLOCK_256B_EN, {0, 1},
{92160, 117760}},
{"BMB" , MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
MISCS_REG_BLOCK_256B_EN, {0, 0},
{36864, 36864}}
};
static struct rbc_reset_defs s_rbc_reset_defs[] = {
{MISCS_REG_RESET_PL_HV,
{0x0, 0x400}},
{MISC_REG_RESET_PL_PDA_VMAIN_1,
{0x4404040, 0x4404040}},
{MISC_REG_RESET_PL_PDA_VMAIN_2,
{0x7, 0x7c00007}},
{MISC_REG_RESET_PL_PDA_VAUX,
{0x2, 0x2}},
};
static struct phy_defs s_phy_defs[] = {
{"nw_phy" , NWS_REG_NWS_CMU_K2,
PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
{"sgmii_phy" , MS_REG_MS_CMU_K2,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
{"pcie_phy0" , PHY_PCIE_REG_PHY0_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
{"pcie_phy1" , PHY_PCIE_REG_PHY1_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
static struct split_type_defs s_split_type_defs[] = {
/* SPLIT_TYPE_NONE */
{"eng" },
/* SPLIT_TYPE_PORT */
{"port" },
/* SPLIT_TYPE_PF */
{"pf" },
/* SPLIT_TYPE_PORT_PF */
{"port" },
/* SPLIT_TYPE_VF */
{"vf" }
};
/******************************** Variables **********************************/
/* The version of the calling app */
static u32 s_app_ver;
/**************************** Private Functions ******************************/
static void qed_static_asserts(void )
{
}
/* Reads and returns a single dword from the specified unaligned buffer */
static u32 qed_read_unaligned_dword(u8 *buf)
{
u32 dword;
memcpy((u8 *)&dword, buf, sizeof (dword));
return dword;
}
/* Sets the value of the specified GRC param */
static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param, u32 val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
dev_data->grc.param_val[grc_param] = val;
}
/* Returns the value of the specified GRC param */
static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
return dev_data->grc.param_val[grc_param];
}
/* Initializes the GRC parameters */
static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
if (!dev_data->grc.params_initialized) {
qed_dbg_grc_set_params_default(p_hwfn);
dev_data->grc.params_initialized = 1;
}
}
/* Sets pointer and size for the specified binary buffer type */
static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
enum bin_dbg_buffer_type buf_type,
const u32 *ptr, u32 size)
{
struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
buf->ptr = (void *)ptr;
buf->size = size;
}
/* Initializes debug data for the specified device */
static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 num_pfs = 0, max_pfs_per_port = 0;
if (dev_data->initialized)
return DBG_STATUS_OK;
if (!s_app_ver)
return DBG_STATUS_APP_VERSION_NOT_SET;
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
dev_data->num_vfs = MAX_NUM_VFS_K2;
num_pfs = MAX_NUM_PFS_K2;
max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
dev_data->num_vfs = MAX_NUM_VFS_BB;
num_pfs = MAX_NUM_PFS_BB;
max_pfs_per_port = MAX_NUM_PFS_BB;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
/* Set HW type */
dev_data->hw_type = HW_TYPE_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
/* Set port mode */
switch (p_hwfn->cdev->num_ports_in_engine) {
case 1:
dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
break ;
case 2:
dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
break ;
case 4:
dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
break ;
}
/* Set 100G mode */
if (QED_IS_CMT(p_hwfn->cdev))
dev_data->mode_enable[MODE_100G] = 1;
/* Set number of ports */
if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
dev_data->mode_enable[MODE_100G])
dev_data->num_ports = 1;
else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
dev_data->num_ports = 2;
else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
dev_data->num_ports = 4;
/* Set number of PFs per port */
dev_data->num_pfs_per_port = min_t(u32,
num_pfs / dev_data->num_ports,
max_pfs_per_port);
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
dev_data->use_dmae = true ;
dev_data->initialized = 1;
return DBG_STATUS_OK;
}
static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
enum block_id block_id)
{
const struct dbg_block *dbg_block;
dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
return dbg_block + block_id;
}
static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
*p_hwfn,
enum block_id
block_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
return (const struct dbg_block_chip *)
p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
block_id * MAX_CHIP_IDS + dev_data->chip_id;
}
static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
*p_hwfn,
u8 reset_reg_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
return (const struct dbg_reset_reg *)
p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
}
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 storm_id, struct fw_info *fw_info)
{
struct storm_defs *storm = &s_storm_defs[storm_id];
struct fw_info_location fw_info_location;
u32 addr, i, size, *dest;
memset(&fw_info_location, 0, sizeof (fw_info_location));
memset(fw_info, 0, sizeof (*fw_info));
/* Read first the address that points to fw_info location.
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
sizeof (fw_info_location);
dest = (u32 *)&fw_info_location;
size = BYTES_TO_DWORDS(sizeof (fw_info_location));
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
/* Read FW version info from Storm RAM */
size = le32_to_cpu(fw_info_location.size);
if (!size || size > sizeof (*fw_info))
return ;
addr = le32_to_cpu(fw_info_location.grc_addr);
dest = (u32 *)fw_info;
size = BYTES_TO_DWORDS(size);
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
}
/* Dumps the specified string to the specified buffer.
* Returns the dumped size in bytes.
*/
static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
{
if (dump)
strcpy(dump_buf, str);
return (u32)strlen(str) + 1;
}
/* Dumps zeros to align the specified buffer to dwords.
* Returns the dumped size in bytes.
*/
static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
{
u8 offset_in_dword, align_size;
offset_in_dword = (u8)(byte_offset & 0x3);
align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
if (dump && align_size)
memset(dump_buf, 0, align_size);
return align_size;
}
/* Writes the specified string param to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_str_param(u32 *dump_buf,
bool dump,
const char *param_name, const char *param_val)
{
char *char_buf = (char *)dump_buf;
u32 offset = 0;
/* Dump param name */
offset += qed_dump_str(char_buf + offset, dump, param_name);
/* Indicate a string param value */
if (dump)
*(char_buf + offset) = 1;
offset++;
/* Dump param value */
offset += qed_dump_str(char_buf + offset, dump, param_val);
/* Align buffer to next dword */
offset += qed_dump_align(char_buf + offset, dump, offset);
return BYTES_TO_DWORDS(offset);
}
/* Writes the specified numeric param to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_num_param(u32 *dump_buf,
bool dump, const char *param_name, u32 param_val)
{
char *char_buf = (char *)dump_buf;
u32 offset = 0;
/* Dump param name */
offset += qed_dump_str(char_buf + offset, dump, param_name);
/* Indicate a numeric param value */
if (dump)
*(char_buf + offset) = 0;
offset++;
/* Align buffer to next dword */
offset += qed_dump_align(char_buf + offset, dump, offset);
/* Dump param value (and change offset from bytes to dwords) */
offset = BYTES_TO_DWORDS(offset);
if (dump)
*(dump_buf + offset) = param_val;
offset++;
return offset;
}
/* Reads the FW version and writes it as a param to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW info from chip */
qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
/* Create FW version/image strings */
if (snprintf(fw_ver_str, sizeof (fw_ver_str),
"%d_%d_%d_%d" , fw_info.ver.num.major,
fw_info.ver.num.minor, fw_info.ver.num.rev,
fw_info.ver.num.eng) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid FW version string\n" );
switch (fw_info.ver.image_id) {
case FW_IMG_KUKU:
strcpy(fw_img_str, "kuku" );
break ;
case FW_IMG_MAIN:
strcpy(fw_img_str, "main" );
break ;
case FW_IMG_L2B:
strcpy(fw_img_str, "l2b" );
break ;
default :
strcpy(fw_img_str, "unknown" );
break ;
}
}
/* Dump FW version, image and timestamp */
offset += qed_dump_str_param(dump_buf + offset,
dump, "fw-version" , fw_ver_str);
offset += qed_dump_str_param(dump_buf + offset,
dump, "fw-image" , fw_img_str);
offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp" ,
le32_to_cpu(fw_info.ver.timestamp));
return offset;
}
/* Reads the MFW version and writes it as a param to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
if (dump &&
!qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
/* Find MCP public data GRC address. Needs to be ORed with
* MCP_REG_SCRATCH due to a HW bug.
*/
public_data_addr = qed_rd(p_hwfn,
p_ptt,
MISC_REG_SHARED_MEM_ADDR) |
MCP_REG_SCRATCH;
/* Find MCP public global section offset */
global_section_offsize_addr = public_data_addr +
offsetof(struct mcp_public_data,
sections) +
sizeof (offsize_t) * PUBLIC_GLOBAL;
global_section_offsize = qed_rd(p_hwfn, p_ptt,
global_section_offsize_addr);
global_section_addr =
MCP_REG_SCRATCH +
(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
/* Read MFW version from MCP public global section */
mfw_ver = qed_rd(p_hwfn, p_ptt,
global_section_addr +
offsetof(struct public_global, mfw_ver));
/* Dump MFW version param */
if (snprintf(mfw_ver_str, sizeof (mfw_ver_str), "%d_%d_%d_%d" ,
(u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
(u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid MFW version string\n" );
}
return qed_dump_str_param(dump_buf, dump, "mfw-version" , mfw_ver_str);
}
/* Reads the chip revision from the chip and writes it as a param to the
* specified buffer. Returns the dumped size in dwords.
*/
static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
char param_str[3] = "??" ;
if (dev_data->hw_type == HW_TYPE_ASIC) {
u32 chip_rev, chip_metal;
chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
param_str[0] = 'a' + (u8)chip_rev;
param_str[1] = '0' + (u8)chip_metal;
}
return qed_dump_str_param(dump_buf, dump, "chip-revision" , param_str);
}
/* Writes a section header to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_section_hdr(u32 *dump_buf,
bool dump, const char *name, u32 num_params)
{
return qed_dump_num_param(dump_buf, dump, name, num_params);
}
/* Writes the common global params to the specified buffer.
* Returns the dumped size in dwords.
*/
static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump,
u8 num_specific_global_params)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
u8 num_params;
/* Dump global params section header */
num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
(dev_data->chip_id == CHIP_BB ? 1 : 0);
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params" , num_params);
/* Store params */
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
offset += qed_dump_mfw_ver_param(p_hwfn,
p_ptt, dump_buf + offset, dump);
offset += qed_dump_chip_revision_param(p_hwfn,
p_ptt, dump_buf + offset, dump);
offset += qed_dump_num_param(dump_buf + offset,
dump, "tools-version" , TOOLS_VERSION);
offset += qed_dump_str_param(dump_buf + offset,
dump,
"chip" ,
s_chip_defs[dev_data->chip_id].name);
offset += qed_dump_str_param(dump_buf + offset,
dump,
"platform" ,
s_hw_type_defs[dev_data->hw_type].name);
offset += qed_dump_num_param(dump_buf + offset,
dump, "pci-func" , p_hwfn->abs_pf_id);
offset += qed_dump_num_param(dump_buf + offset,
dump, "epoch" , qed_get_epoch_time());
if (dev_data->chip_id == CHIP_BB)
offset += qed_dump_num_param(dump_buf + offset,
dump, "path" , QED_PATH_ID(p_hwfn));
return offset;
}
/* Writes the "last" section (including CRC) to the specified buffer at the
* given offset. Returns the dumped size in dwords.
*/
static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
{
u32 start_offset = offset;
/* Dump CRC section header */
offset += qed_dump_section_hdr(dump_buf + offset, dump, "last" , 0);
/* Calculate CRC32 and add it to the dword after the "last" section */
if (dump)
*(dump_buf + offset) = ~crc32(0xffffffff,
(u8 *)dump_buf,
DWORDS_TO_BYTES(offset));
offset++;
return offset - start_offset;
}
/* Update blocks reset state */
static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
u8 rst_reg_id;
u32 blk_id;
/* Read reset registers */
for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
const struct dbg_reset_reg *rst_reg;
bool rst_reg_removed;
u32 rst_reg_addr;
rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
rst_reg_removed = GET_FIELD(rst_reg->data,
DBG_RESET_REG_IS_REMOVED);
rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
DBG_RESET_REG_ADDR));
if (!rst_reg_removed)
reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
rst_reg_addr);
}
/* Check if blocks are in reset */
for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
const struct dbg_block_chip *blk;
bool has_rst_reg;
bool is_removed;
blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
has_rst_reg = GET_FIELD(blk->flags,
DBG_BLOCK_CHIP_HAS_RESET_REG);
if (!is_removed && has_rst_reg)
dev_data->block_in_reset[blk_id] =
!(reg_val[blk->reset_reg_id] &
BIT(blk->reset_reg_bit_offset));
}
}
/* is_mode_match recursive function */
static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
u16 *modes_buf_offset, u8 rec_depth)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 *dbg_array;
bool arg1, arg2;
u8 tree_val;
if (rec_depth > MAX_RECURSION_DEPTH) {
DP_NOTICE(p_hwfn,
"Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n" );
return false ;
}
/* Get next element from modes tree buffer */
dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
tree_val = dbg_array[(*modes_buf_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return !qed_is_mode_match_rec(p_hwfn,
modes_buf_offset, rec_depth + 1);
case INIT_MODE_OP_OR:
case INIT_MODE_OP_AND:
arg1 = qed_is_mode_match_rec(p_hwfn,
modes_buf_offset, rec_depth + 1);
arg2 = qed_is_mode_match_rec(p_hwfn,
modes_buf_offset, rec_depth + 1);
return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
arg2) : (arg1 && arg2);
default :
return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
}
}
/* Returns true if the mode (specified using modes_buf_offset) is enabled */
static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
{
return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
}
/* Enable / disable the Debug block */
static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
{
qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
}
/* Resets the Debug block */
static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
const struct dbg_reset_reg *reset_reg;
const struct dbg_block_chip *block;
block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
reset_reg_addr =
DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
new_reset_reg_val =
old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
}
/* Enable / disable Debug Bus clients according to the specified mask
* (1 = enable, 0 = disable).
*/
static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 client_mask)
{
qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
}
static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
u8 line_id,
u8 enable_mask,
u8 right_shift,
u8 force_valid_mask, u8 force_frame_mask)
{
const struct dbg_block_chip *block =
qed_get_dbg_block_per_chip(p_hwfn, block_id);
qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
line_id);
qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
enable_mask);
qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
right_shift);
qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
force_valid_mask);
qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
force_frame_mask);
}
/* Disable debug bus in all blocks */
static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 block_id;
/* Disable all blocks */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
const struct dbg_block_chip *block_per_chip =
qed_get_dbg_block_per_chip(p_hwfn,
(enum block_id)block_id);
if (GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_IS_REMOVED) ||
dev_data->block_in_reset[block_id])
continue ;
/* Disable debug bus */
if (GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
u32 dbg_en_addr =
block_per_chip->dbg_dword_enable_reg_addr;
u16 modes_buf_offset =
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
bool eval_mode =
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0;
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_wr(p_hwfn, p_ptt,
DWORDS_TO_BYTES(dbg_en_addr),
0);
}
}
}
/* Returns true if the specified entity (indicated by GRC param) should be
* included in the dump, false otherwise.
*/
static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param)
{
return qed_grc_get_param(p_hwfn, grc_param) > 0;
}
/* Returns the storm_id that matches the specified Storm letter,
* or MAX_DBG_STORMS if invalid storm letter.
*/
static enum dbg_storms qed_get_id_from_letter(char storm_letter)
{
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
if (s_storm_defs[storm_id].letter == storm_letter)
return (enum dbg_storms)storm_id;
return MAX_DBG_STORMS;
}
/* Returns true of the specified Storm should be included in the dump, false
* otherwise.
*/
static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
enum dbg_storms storm)
{
return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
}
/* Returns true if the specified memory should be included in the dump, false
* otherwise.
*/
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
const struct dbg_block *block;
u8 i;
block = get_dbg_block(p_hwfn, block_id);
/* If the block is associated with a Storm, check Storm match */
if (block->associated_storm_letter) {
enum dbg_storms associated_storm_id =
qed_get_id_from_letter(block->associated_storm_letter);
if (associated_storm_id == MAX_DBG_STORMS ||
!qed_grc_is_storm_included(p_hwfn, associated_storm_id))
return false ;
}
for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
struct big_ram_defs *big_ram = &s_big_ram_defs[i];
if (mem_group_id == big_ram->mem_group_id ||
mem_group_id == big_ram->ram_mem_group_id)
return qed_grc_is_included(p_hwfn, big_ram->grc_param);
}
switch (mem_group_id) {
case MEM_GROUP_PXP_ILT:
case MEM_GROUP_PXP_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
case MEM_GROUP_RAM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
case MEM_GROUP_PBUF:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
case MEM_GROUP_CAU_MEM:
case MEM_GROUP_CAU_SB:
case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
case MEM_GROUP_CAU_MEM_EXT:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
case MEM_GROUP_CFC_MEM:
case MEM_GROUP_CONN_CFC_MEM:
case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
case MEM_GROUP_DORQ_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
case MEM_GROUP_MULD_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
case MEM_GROUP_PRS_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
case MEM_GROUP_DMAE_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
case MEM_GROUP_TM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
case MEM_GROUP_SDM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
case MEM_GROUP_TDIF_CTX:
case MEM_GROUP_RDIF_CTX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
case MEM_GROUP_CM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
case MEM_GROUP_IOR:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
default :
return true ;
}
}
/* Stalls all Storms */
static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool stall)
{
u32 reg_addr;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue ;
reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
SEM_FAST_REG_STALL_0;
qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
msleep(STALL_DELAY_MS);
}
/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
* taken out of reset.
*/
static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool rbc_only)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 chip_id = dev_data->chip_id;
u32 i;
/* Take RBCs out of reset */
for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
qed_wr(p_hwfn,
p_ptt,
s_rbc_reset_defs[i].reset_reg_addr +
RESET_REG_UNRESET_OFFSET,
s_rbc_reset_defs[i].reset_val[chip_id]);
if (!rbc_only) {
u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
u8 reset_reg_id;
u32 block_id;
/* Fill reset regs values */
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
bool is_removed, has_reset_reg, unreset_before_dump;
const struct dbg_block_chip *block;
block = qed_get_dbg_block_per_chip(p_hwfn,
(enum block_id)
block_id);
is_removed =
GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
has_reset_reg =
GET_FIELD(block->flags,
DBG_BLOCK_CHIP_HAS_RESET_REG);
unreset_before_dump =
GET_FIELD(block->flags,
DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
if (!is_removed && has_reset_reg && unreset_before_dump)
reg_val[block->reset_reg_id] |=
BIT(block->reset_reg_bit_offset);
}
/* Write reset registers */
for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
reset_reg_id++) {
const struct dbg_reset_reg *reset_reg;
u32 reset_reg_addr;
reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
if (GET_FIELD
(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
continue ;
if (reg_val[reset_reg_id]) {
reset_reg_addr =
GET_FIELD(reset_reg->data,
DBG_RESET_REG_ADDR);
qed_wr(p_hwfn,
p_ptt,
DWORDS_TO_BYTES(reset_reg_addr) +
RESET_REG_UNRESET_OFFSET,
reg_val[reset_reg_id]);
}
}
}
}
/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
enum block_id block_id, enum dbg_attn_type attn_type)
{
const struct dbg_attn_block *base_attn_block_arr =
(const struct dbg_attn_block *)
p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
return &base_attn_block_arr[block_id].per_type_data[attn_type];
}
/* Returns the attention registers of the specified block */
static const struct dbg_attn_reg *
qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
enum block_id block_id, enum dbg_attn_type attn_type,
u8 *num_attn_regs)
{
const struct dbg_attn_block_type_data *block_type_data =
qed_get_block_attn_data(p_hwfn, block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
return (const struct dbg_attn_reg *)
p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
block_type_data->regs_offset;
}
/* For each block, clear the status of all parities */
static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
const struct dbg_attn_reg *attn_reg_arr;
u32 block_id, sts_clr_address;
u8 reg_idx, num_attn_regs;
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
continue ;
attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
(enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
u16 modes_buf_offset;
bool eval_mode;
/* Check mode */
eval_mode = GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0;
modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
sts_clr_address = reg_data->sts_clr_address;
/* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_rd(p_hwfn, p_ptt,
DWORDS_TO_BYTES(sts_clr_address));
}
}
}
/* Finds the meta data image in NVRAM */
static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 image_type,
u32 *nvram_offset_bytes,
u32 *nvram_size_bytes,
bool b_can_sleep)
{
u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
struct mcp_file_att file_att;
int nvm_result;
/* Call NVRAM get file command */
nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
p_ptt,
DRV_MSG_CODE_NVM_GET_FILE_ATT,
image_type,
&ret_mcp_resp,
&ret_mcp_param,
&ret_txn_size,
(u32 *)&file_att,
b_can_sleep);
/* Check response */
if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
/* Update return values */
*nvram_offset_bytes = file_att.nvm_start_addr;
*nvram_size_bytes = file_att.len;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n" ,
image_type, *nvram_offset_bytes, *nvram_size_bytes);
/* Check alignment */
if (*nvram_size_bytes & 0x3)
return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
return DBG_STATUS_OK;
}
/* Reads data from NVRAM */
static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_bytes,
u32 nvram_size_bytes,
u32 *ret_buf,
bool b_can_sleep)
{
u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
u32 read_offset = 0, param = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"nvram_read: reading image of size %d bytes from NVRAM\n" ,
nvram_size_bytes);
do {
bytes_to_copy =
(bytes_left >
MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
/* Call NVRAM read command */
SET_MFW_FIELD(param,
DRV_MB_PARAM_NVM_OFFSET,
nvram_offset_bytes + read_offset);
SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_NVM_READ_NVRAM, param,
&ret_mcp_resp,
&ret_mcp_param, &ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset),
b_can_sleep))
return DBG_STATUS_NVRAM_READ_FAILED;
/* Check response */
if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_READ_FAILED;
/* Update read offset */
read_offset += ret_read_size;
bytes_left -= ret_read_size;
} while (bytes_left > 0);
return DBG_STATUS_OK;
}
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* the following parameters are dumped:
* - count: no. of dumped entries
* - split_type: split type
* - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
* - reg_type_name: register type name (dumped only if reg_type_name != NULL)
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
enum init_split_types split_type,
u8 split_id, const char *reg_type_name)
{
u8 num_params = 2 +
(split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "grc_regs" , num_params);
offset += qed_dump_num_param(dump_buf + offset,
dump, "count" , num_reg_entries);
offset += qed_dump_str_param(dump_buf + offset,
dump, "split" ,
s_split_type_defs[split_type].name);
if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id" , split_id);
if (reg_type_name)
offset += qed_dump_str_param(dump_buf + offset,
dump, "type" , reg_type_name);
return offset;
}
/* Reads the specified registers into the specified buffer.
* The addr and len arguments are specified in dwords.
*/
void qed_read_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
}
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus,
enum init_split_types split_type,
u8 split_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 port_id = 0, pf_id = 0, vf_id = 0;
bool read_using_dmae = false ;
u32 thresh;
u16 fid;
if (!dump)
return len;
switch (split_type) {
case SPLIT_TYPE_PORT:
port_id = split_id;
break ;
case SPLIT_TYPE_PF:
pf_id = split_id;
break ;
case SPLIT_TYPE_PORT_PF:
port_id = split_id / dev_data->num_pfs_per_port;
pf_id = port_id + dev_data->num_ports *
(split_id % dev_data->num_pfs_per_port);
break ;
case SPLIT_TYPE_VF:
vf_id = split_id;
break ;
default :
break ;
}
/* Try reading using DMAE */
if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
(len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
(PROTECT_WIDE_BUS && wide_bus))) {
struct qed_dmae_params dmae_params;
/* Set DMAE params */
memset(&dmae_params, 0, sizeof (dmae_params));
SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
switch (split_type) {
case SPLIT_TYPE_PORT:
SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1);
dmae_params.port_id = port_id;
break ;
case SPLIT_TYPE_PF:
SET_FIELD(dmae_params.flags,
QED_DMAE_PARAMS_SRC_PF_VALID, 1);
dmae_params.src_pfid = pf_id;
break ;
case SPLIT_TYPE_PORT_PF:
SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1);
SET_FIELD(dmae_params.flags,
QED_DMAE_PARAMS_SRC_PF_VALID, 1);
dmae_params.port_id = port_id;
dmae_params.src_pfid = pf_id;
break ;
default :
break ;
}
/* Execute DMAE command */
read_using_dmae = !qed_dmae_grc2host(p_hwfn,
p_ptt,
DWORDS_TO_BYTES(addr),
(u64)(uintptr_t)(dump_buf),
len, &dmae_params);
if (!read_using_dmae) {
dev_data->use_dmae = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Failed reading from chip using DMAE, using GRC instead\n" );
}
}
if (read_using_dmae)
goto print_log;
/* If not read using DMAE, read using GRC */
/* Set pretend */
if (split_type != dev_data->pretend.split_type ||
split_id != dev_data->pretend.split_id) {
switch (split_type) {
case SPLIT_TYPE_PORT:
qed_port_pretend(p_hwfn, p_ptt, port_id);
break ;
case SPLIT_TYPE_PF:
fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
pf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break ;
case SPLIT_TYPE_PORT_PF:
fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
pf_id);
qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
break ;
case SPLIT_TYPE_VF:
fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
| FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
vf_id);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break ;
default :
break ;
}
dev_data->pretend.split_type = (u8)split_type;
dev_data->pretend.split_id = split_id;
}
/* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
print_log:
/* Print log */
dev_data->num_regs_read += len;
thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
if ((dev_data->num_regs_read / thresh) >
((dev_data->num_regs_read - len) / thresh))
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumped %d registers...\n" , dev_data->num_regs_read);
return len;
}
/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
bool dump, u32 addr, u32 len)
{
if (dump)
*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
return 1;
}
/* Dumps GRC registers sequence. Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus,
enum init_split_types split_type, u8 split_id)
{
u32 offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump, addr, len, wide_bus,
split_type, split_id);
return offset;
}
/* Dumps GRC registers sequence with skip cycle.
* Returns the dumped size in dwords.
* - addr: start GRC address in dwords
* - total_len: total no. of dwords to dump
* - read_len: no. consecutive dwords to read
* - skip_len: no. of dwords to skip (and fill with zeros)
*/
static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump,
u32 addr,
u32 total_len,
u32 read_len, u32 skip_len)
{
u32 offset = 0, reg_offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
if (!dump)
return offset + total_len;
while (reg_offset < total_len) {
u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump, addr, curr_len, false ,
SPLIT_TYPE_NONE, 0);
reg_offset += curr_len;
addr += curr_len;
if (reg_offset < total_len) {
curr_len = min_t(u32, skip_len, total_len - skip_len);
memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
offset += curr_len;
reg_offset += curr_len;
addr += curr_len;
}
}
return offset;
}
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
enum init_split_types split_type,
u8 split_id,
bool block_enable[MAX_BLOCK_ID],
u32 *num_dumped_reg_entries)
{
u32 i, offset = 0, input_offset = 0;
bool mode_match = true ;
*num_dumped_reg_entries = 0;
while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
input_regs_arr.ptr + input_offset++;
u16 modes_buf_offset;
bool eval_mode;
/* Check mode/block */
eval_mode = GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
if (!mode_match || !block_enable[cond_hdr->block_id]) {
input_offset += cond_hdr->data_size;
continue ;
}
for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
input_regs_arr.ptr + input_offset;
u32 addr, len;
bool wide_bus;
addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
addr,
len,
wide_bus,
split_type, split_id);
(*num_dumped_reg_entries)++;
}
}
return offset;
}
/* Dumps GRC registers entries. Returns the dumped size in dwords. */
static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct virt_mem_desc input_regs_arr,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
enum init_split_types split_type,
u8 split_id, const char *reg_type_name)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum init_split_types hdr_split_type = split_type;
u32 num_dumped_reg_entries, offset;
u8 hdr_split_id = split_id;
/* In PORT_PF split type, print a port split header */
if (split_type == SPLIT_TYPE_PORT_PF) {
hdr_split_type = SPLIT_TYPE_PORT;
hdr_split_id = split_id / dev_data->num_pfs_per_port;
}
/* Calculate register dump header size (and skip it for now) */
offset = qed_grc_dump_regs_hdr(dump_buf,
false ,
0,
hdr_split_type,
hdr_split_id, reg_type_name);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
p_ptt,
input_regs_arr,
dump_buf + offset,
dump,
split_type,
split_id,
block_enable,
&num_dumped_reg_entries);
/* Write register dump header */
if (dump && num_dumped_reg_entries > 0)
qed_grc_dump_regs_hdr(dump_buf,
dump,
num_dumped_reg_entries,
hdr_split_type,
hdr_split_id, reg_type_name);
return num_dumped_reg_entries > 0 ? offset : 0;
}
/* Dumps registers according to the input registers array. Returns the dumped
* size in dwords.
*/
static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
const char *reg_type_name)
{
struct virt_mem_desc *dbg_buf =
&p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0, input_offset = 0;
while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
const struct dbg_dump_split_hdr *split_hdr;
struct virt_mem_desc curr_input_regs_arr;
enum init_split_types split_type;
u16 split_count = 0;
u32 split_data_size;
u8 split_id;
split_hdr =
(const struct dbg_dump_split_hdr *)
dbg_buf->ptr + input_offset++;
split_type =
GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
split_data_size = GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_DATA_SIZE);
curr_input_regs_arr.ptr =
(u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
input_offset;
curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
switch (split_type) {
case SPLIT_TYPE_NONE:
split_count = 1;
break ;
case SPLIT_TYPE_PORT:
split_count = dev_data->num_ports;
break ;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
split_count = dev_data->num_ports *
dev_data->num_pfs_per_port;
break ;
case SPLIT_TYPE_VF:
split_count = dev_data->num_vfs;
break ;
default :
return 0;
}
for (split_id = 0; split_id < split_count; split_id++)
offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
curr_input_regs_arr,
dump_buf + offset,
dump, block_enable,
split_type,
split_id,
reg_type_name);
input_offset += split_data_size;
}
/* Cancel pretends (pretend to original PF) */
if (dump) {
qed_fid_pretend(p_hwfn, p_ptt,
FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
p_hwfn->rel_pf_id));
dev_data->pretend.split_type = SPLIT_TYPE_NONE;
dev_data->pretend.split_id = 0;
}
return offset;
}
/* Dump reset registers. Returns the dumped size in dwords. */
static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
u32 offset = 0, num_regs = 0;
u8 reset_reg_id;
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
false ,
0, SPLIT_TYPE_NONE, 0, "RESET_REGS" );
/* Write reset registers */
for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
reset_reg_id++) {
const struct dbg_reset_reg *reset_reg;
u32 reset_reg_addr;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=92 H=95 G=93
¤ Dauer der Verarbeitung: 0.41 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland