/** * ice_dump_phy_type - helper function to dump phy_type * @hw: pointer to the HW structure * @low: 64 bit value for phy_type_low * @high: 64 bit value for phy_type_high * @prefix: prefix string to differentiate multiple dumps
*/ staticvoid
ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, constchar *prefix)
{
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { if (low & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
prefix, i, ice_link_mode_str_low[i]);
}
for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { if (high & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
prefix, i, ice_link_mode_str_high[i]);
}
}
/** * ice_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure.
*/ staticint ice_set_mac_type(struct ice_hw *hw)
{ if (hw->vendor_id != PCI_VENDOR_ID_INTEL) return -ENODEV;
switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: case ICE_DEV_ID_E810_XXV_BACKPLANE: case ICE_DEV_ID_E810_XXV_QSFP: case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810; break; case ICE_DEV_ID_E823C_10G_BASE_T: case ICE_DEV_ID_E823C_BACKPLANE: case ICE_DEV_ID_E823C_QSFP: case ICE_DEV_ID_E823C_SFP: case ICE_DEV_ID_E823C_SGMII: case ICE_DEV_ID_E822C_10G_BASE_T: case ICE_DEV_ID_E822C_BACKPLANE: case ICE_DEV_ID_E822C_QSFP: case ICE_DEV_ID_E822C_SFP: case ICE_DEV_ID_E822C_SGMII: case ICE_DEV_ID_E822L_10G_BASE_T: case ICE_DEV_ID_E822L_BACKPLANE: case ICE_DEV_ID_E822L_SFP: case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_10G_BASE_T: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823L_BACKPLANE: case ICE_DEV_ID_E823L_QSFP: case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC; break; case ICE_DEV_ID_E825C_BACKPLANE: case ICE_DEV_ID_E825C_QSFP: case ICE_DEV_ID_E825C_SFP: case ICE_DEV_ID_E825C_SGMII:
hw->mac_type = ICE_MAC_GENERIC_3K_E825; break; case ICE_DEV_ID_E830CC_BACKPLANE: case ICE_DEV_ID_E830CC_QSFP56: case ICE_DEV_ID_E830CC_SFP: case ICE_DEV_ID_E830CC_SFP_DD: case ICE_DEV_ID_E830C_BACKPLANE: case ICE_DEV_ID_E830_XXV_BACKPLANE: case ICE_DEV_ID_E830C_QSFP: case ICE_DEV_ID_E830_XXV_QSFP: case ICE_DEV_ID_E830C_SFP: case ICE_DEV_ID_E830_XXV_SFP: case ICE_DEV_ID_E835CC_BACKPLANE: case ICE_DEV_ID_E835CC_QSFP56: case ICE_DEV_ID_E835CC_SFP: case ICE_DEV_ID_E835C_BACKPLANE: case ICE_DEV_ID_E835C_QSFP: case ICE_DEV_ID_E835C_SFP: case ICE_DEV_ID_E835_L_BACKPLANE: case ICE_DEV_ID_E835_L_QSFP: case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830; break; default:
hw->mac_type = ICE_MAC_UNKNOWN; break;
}
/** * ice_aq_manage_mac_read - manage MAC address read command * @hw: pointer to the HW struct * @buf: a virtual buffer to hold the manage MAC read response * @buf_size: Size of the virtual buffer * @cd: pointer to command details structure or NULL * * This function is used to return per PF station MAC address (0x0107). * NOTE: Upon successful completion of this command, MAC address information * is returned in user specified buffer. Please interpret user specified * buffer as "manage_mac_read" response. * Response such as various MAC addresses are stored in HW struct (port.mac) * ice_discover_dev_caps is expected to be called before this function is * called.
*/ staticint
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd)
{ struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct libie_aq_desc desc; int status;
u16 flags;
u8 i;
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); return -EIO;
}
/* A single port can report up to two (LAN and WoL) addresses */ for (i = 0; i < cmd->num_addr; i++) if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
ether_addr_copy(hw->port_info->mac.lan_addr,
resp[i].mac_addr);
ether_addr_copy(hw->port_info->mac.perm_addr,
resp[i].mac_addr); break;
}
return 0;
}
/** * ice_aq_get_phy_caps - returns PHY capabilities * @pi: port information structure * @qual_mods: report qualified modules * @report_mode: report mode capabilities * @pcaps: structure for PHY capabilities to be filled * @cd: pointer to command details structure or NULL * * Returns the various PHY capabilities supported on the Port (0x0600)
*/ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd)
{ struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps); struct libie_aq_desc desc; constchar *prefix; struct ice_hw *hw; int status;
/** * ice_aq_get_link_topo_handle - get link topology node return status * @pi: port information structure * @node_type: requested node type * @cd: pointer to command details structure or NULL * * Get link topology node return status for specified node type (0x06E0) * * Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present, then * connection type is backplane or BASE-T.
*/ staticint
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, struct ice_sq_cd *cd)
{ struct ice_aqc_get_link_topo *cmd; struct libie_aq_desc desc;
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) return -EINTR;
if (node_handle)
*node_handle = le16_to_cpu(resp->addr.handle); if (node_part_number)
*node_part_number = resp->node_part_num;
return 0;
}
/** * ice_find_netlist_node * @hw: pointer to the hw struct * @node_type: type of netlist node to look for * @ctx: context of the search * @node_part_number: node part number to look for * @node_handle: output parameter if node found - optional * * Scan the netlist for a node handle of the given node type and part number. * * If node_handle is non-NULL it will be modified on function exit. It is only * valid if the function returns zero, and should be ignored on any non-zero * return value. * * Return: * * 0 if the node is found, * * -ENOENT if no handle was found, * * negative error code on failure to access the AQ.
*/ staticint ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { struct ice_aqc_get_link_topo cmd = {};
u8 rec_node_part_number; int status;
status = ice_aq_get_netlist_node(hw, &cmd,
&rec_node_part_number,
node_handle); if (status) return status;
if (rec_node_part_number == node_part_number) return 0;
}
return -ENOENT;
}
/** * ice_is_media_cage_present * @pi: port information structure * * Returns true if media cage is present, else false. If no cage, then * media type is backplane or BASE-T.
*/ staticbool ice_is_media_cage_present(struct ice_port_info *pi)
{ /* Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present then * connection type is backplane or BASE-T.
*/ return !ice_aq_get_link_topo_handle(pi,
ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
NULL);
}
/** * ice_get_media_type - Gets media type * @pi: port information structure
*/ staticenum ice_media_type ice_get_media_type(struct ice_port_info *pi)
{ struct ice_link_status *hw_link_info;
if (!pi) return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info; if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) /* If more than one media type is selected, report unknown */ return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) { /* 1G SGMII is a special case where some DA cable PHYs * may show this as an option when it really shouldn't * be since SGMII is meant to be between a MAC and a PHY * in a backplane. Try to detect this case and handle it
*/ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
(hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) return ICE_MEDIA_DA;
switch (hw_link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_50GBASE_SR2: case ICE_PHY_TYPE_LOW_50GBASE_LR2: case ICE_PHY_TYPE_LOW_50GBASE_SR: case ICE_PHY_TYPE_LOW_50GBASE_FR: case ICE_PHY_TYPE_LOW_50GBASE_LR: case ICE_PHY_TYPE_LOW_100GBASE_SR4: case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_T: case ICE_PHY_TYPE_LOW_5GBASE_T: case ICE_PHY_TYPE_LOW_10GBASE_T: case ICE_PHY_TYPE_LOW_25GBASE_T: return ICE_MEDIA_BASET; case ICE_PHY_TYPE_LOW_10G_SFI_DA: case ICE_PHY_TYPE_LOW_25GBASE_CR: case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_50GBASE_CR2: case ICE_PHY_TYPE_LOW_50GBASE_CP: case ICE_PHY_TYPE_LOW_100GBASE_CR4: case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40G_XLAUI: case ICE_PHY_TYPE_LOW_50G_LAUI2: case ICE_PHY_TYPE_LOW_50G_AUI2: case ICE_PHY_TYPE_LOW_50G_AUI1: case ICE_PHY_TYPE_LOW_100G_AUI4: case ICE_PHY_TYPE_LOW_100G_CAUI4: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_DA;
fallthrough; case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_50GBASE_KR2: case ICE_PHY_TYPE_LOW_100GBASE_KR4: case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: return ICE_MEDIA_BACKPLANE;
}
} else { switch (hw_link_info->phy_type_high) { case ICE_PHY_TYPE_HIGH_100G_AUI2: case ICE_PHY_TYPE_HIGH_100G_CAUI2: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_DA;
fallthrough; case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: return ICE_MEDIA_FIBER;
}
} return ICE_MEDIA_UNKNOWN;
}
/** * ice_get_link_status_datalen * @hw: pointer to the HW struct * * Returns datalength for the Get Link Status AQ command, which is bigger for * newer adapter families handled by ice driver.
*/ static u16 ice_get_link_status_datalen(struct ice_hw *hw)
{ switch (hw->mac_type) { case ICE_MAC_E830: return ICE_AQC_LS_DATA_SIZE_V2; case ICE_MAC_E810: default: return ICE_AQC_LS_DATA_SIZE_V1;
}
}
/** * ice_aq_get_link_info * @pi: port information structure * @ena_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cd: pointer to command details structure or NULL * * Get Link Status (0x607). Returns the link status of the adapter.
*/ int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd)
{ struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; struct libie_aq_desc desc; bool tx_pause, rx_pause; struct ice_hw *hw;
u16 cmd_flags; int status;
if (!pi) return -EINVAL;
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
li = &pi->phy.link_info;
hw_fc_info = &pi->fc;
/* save link status information */ if (link)
*link = *li;
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
return 0;
}
/** * ice_fill_tx_timer_and_fc_thresh * @hw: pointer to the HW struct * @cmd: pointer to MAC cfg structure * * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command * descriptor
*/ staticvoid
ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, struct ice_aqc_set_mac_cfg *cmd)
{
u32 val, fc_thres_m;
/* We read back the transmit timer and FC threshold value of * LFC. Thus, we will use index = * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. * * Also, because we are operating on transmit timer and FC * threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/ #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
if (hw->mac_type == ICE_MAC_E830) { /* Retrieve the transmit timer */
val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
cmd->tx_tmr_value =
le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
/* Retrieve the fc threshold */
val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
} else { /* Retrieve the transmit timer */
val = rd32(hw,
E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
cmd->tx_tmr_value =
le16_encode_bits(val,
E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
/* Retrieve the fc threshold */
val = rd32(hw,
E800_REFRESH_TMR(E800_IDX_OF_LFC));
fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
}
cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
}
/** * ice_aq_set_mac_cfg * @hw: pointer to the HW struct * @max_frame_size: Maximum Frame Size to be supported * @cd: pointer to command details structure or NULL * * Set MAC configuration (0x0603)
*/ int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{ struct ice_aqc_set_mac_cfg *cmd; struct libie_aq_desc desc;
/** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * * Determines the ITR/INTRL granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on.
*/ staticvoid ice_get_itr_intrl_gran(struct ice_hw *hw)
{
u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
rd32(hw, GL_PWR_MODE_CTL));
switch (max_agg_bw) { case ICE_MAX_AGG_BW_200G: case ICE_MAX_AGG_BW_100G: case ICE_MAX_AGG_BW_50G:
hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; break; case ICE_MAX_AGG_BW_25G:
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; break;
}
}
/** * ice_wait_for_fw - wait for full FW readiness * @hw: pointer to the hardware structure * @timeout: milliseconds that can elapse before timing out * * Return: 0 on success, -ETIMEDOUT on timeout.
*/ staticint ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
{ int fw_loading;
u32 elapsed = 0;
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw); if (status) {
ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc;
}
ice_sched_get_psm_clk_freq(hw);
/* Initialize port_info struct with scheduler data */
status = ice_sched_init_port(hw->port_info); if (status) goto err_unroll_sched;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) {
status = -ENOMEM; goto err_unroll_sched;
}
/* Initialize port_info struct with PHY capabilities */
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL); if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
/* Initialize port_info struct with link information */
status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); if (status) goto err_unroll_sched;
/* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
status = -EIO; goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list); /* Initialize max burst size */ if (!hw->max_burst_size)
ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched;
/* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */
mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
GFP_KERNEL); if (!mac_buf) {
status = -ENOMEM; goto err_unroll_fltr_mgmt_struct;
}
if (status) goto err_unroll_fltr_mgmt_struct; /* enable jumbo frame support at MAC level */
status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); if (status) goto err_unroll_fltr_mgmt_struct; /* Obtain counter base index which would be used by flow director */
status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); if (status) goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
/* Some cards require longer initialization times * due to necessity of loading FW from an external source. * This can take even half a minute.
*/ if (ice_is_pf_c827(hw)) {
status = ice_wait_for_fw(hw, 30000); if (status) {
dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); goto err_unroll_fltr_mgmt_struct;
}
}
/** * ice_deinit_hw - unroll initialization operations done by ice_init_hw * @hw: pointer to the hardware structure * * This should be called only during nominal operation, not as a result of * ice_init_hw() failing since ice_init_hw() will take care of unrolling * applicable initializations if it fails for any reason.
*/ void ice_deinit_hw(struct ice_hw *hw)
{
ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
ice_cleanup_fltr_mgmt_struct(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
}
/** * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure
*/ int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time.
*/
grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
rd32(hw, GLGEN_RSTCTL)) + 10;
for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break;
}
if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return -EIO;
}
/* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, GLNVM_ULD) & uld_mask; if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break;
}
mdelay(10);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg); return -EIO;
}
return 0;
}
/** * ice_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset
*/ staticint ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
/* If at function entry a global reset was already in progress, i.e. * state is not 'device active' or any of the reset done bits are not * set in GLNVM_ULD, there is no need for a PF Reset; poll until the * global reset is done.
*/ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) return -EIO;
return 0;
}
/* Reset the PF */
reg = rd32(hw, PFGEN_CTRL);
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
/* Wait for the PFR to complete. The wait time is the global config lock * timeout plus the PFR timeout which will account for a possible reset * that is occurring during a download package operation.
*/ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL); if (!(reg & PFGEN_CTRL_PFSWR_M)) break;
mdelay(1);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); return -EIO;
}
return 0;
}
/** * ice_reset - Perform different types of reset * @hw: pointer to the hardware structure * @req: reset request * * This function triggers a reset as specified by the req parameter. * * Note: * If anything other than a PF reset is triggered, PXE mode is restored. * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow.
*/ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
switch (req) { case ICE_RESET_PFR: return ice_pf_reset(hw); case ICE_RESET_CORER:
ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
val = GLGEN_RTRIG_CORER_M; break; case ICE_RESET_GLOBR:
ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
val = GLGEN_RTRIG_GLOBR_M; break; default: return -EINVAL;
}
val |= rd32(hw, GLGEN_RTRIG);
wr32(hw, GLGEN_RTRIG, val);
ice_flush(hw);
/* wait for the FW to be ready */ return ice_check_reset(hw);
}
/** * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue
*/ staticvoid ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, const ice_rxq_ctx_buf_t *rxq_ctx,
u32 rxq_index)
{ /* Copy each dword separately to HW */ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
u32 ctx = ((const u32 *)rxq_ctx)[i];
wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
}
}
/** * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers * @hw: pointer to the hardware structure * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue
*/ staticvoid ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
ice_rxq_ctx_buf_t *rxq_ctx,
u32 rxq_index)
{
u32 *ctx = (u32 *)rxq_ctx;
/* Copy each dword separately from HW */ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
*ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
}
}
/** * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer * @ctx: the Rx queue context to pack * @buf: the HW buffer to pack into * * Pack the Rx queue context from the CPU-friendly unpacked buffer into its * bit-packed HW layout.
*/ staticvoid ice_pack_rxq_ctx(conststruct ice_rlan_ctx *ctx,
ice_rxq_ctx_buf_t *buf)
{
pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/** * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer * @buf: the HW buffer to unpack from * @ctx: the Rx queue context to unpack * * Unpack the Rx queue context from the HW buffer into the CPU-friendly * structure.
*/ staticvoid ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf, struct ice_rlan_ctx *ctx)
{
unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/** * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * * Pack the sparse Rx Queue context into dense hardware format and write it * into the HW register space. * * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
ice_rxq_ctx_buf_t buf = {};
if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL;
/** * ice_read_rxq_ctx - Read Rx queue context from HW * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the Rx queue context * @rxq_index: the index of the Rx queue * * Read the Rx queue context from the hardware registers, and unpack it into * the sparse Rx queue context structure. * * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/ int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
ice_rxq_ctx_buf_t buf = {};
if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL;
/** * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer * @ctx: the Tx queue context to pack * @buf: the Admin Queue HW buffer to pack into * * Pack the Tx queue context from the CPU-friendly unpacked buffer into its * bit-packed Admin Queue layout.
*/ void ice_pack_txq_ctx(conststruct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
{
pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/** * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer * @ctx: the Tx queue context to pack * @buf: the HW buffer to pack into * * Pack the Tx queue context from the CPU-friendly unpacked buffer into its * bit-packed HW layout, including the internal data portion.
*/ staticvoid ice_pack_txq_ctx_full(conststruct ice_tlan_ctx *ctx,
ice_txq_ctx_buf_full_t *buf)
{
pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/** * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer * @buf: the HW buffer to unpack from * @ctx: the Tx queue context to unpack * * Unpack the Tx queue context from the HW buffer (including the full internal * state) into the CPU-friendly structure.
*/ staticvoid ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf, struct ice_tlan_ctx *ctx)
{
unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
}
/** * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers * @hw: pointer to the hardware structure * @txq_ctx: pointer to the packed Tx queue context, including internal state * @txq_index: the index of the Tx queue * * Copy Tx Queue context from HW register space to dense structure
*/ staticvoid ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
ice_txq_ctx_buf_full_t *txq_ctx,
u32 txq_index)
{ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 *ctx = (u32 *)txq_ctx;
u32 txq_base, reg;
/* Get Tx queue base within card space */
txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
/** * ice_read_txq_ctx - Read Tx queue context from HW * @hw: pointer to the hardware structure * @tlan_ctx: pointer to the Tx queue context * @txq_index: the index of the Tx queue * * Read the Tx queue context from the HW registers, then unpack it into the * ice_tlan_ctx structure for use. * * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
*/ int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index)
{
ice_txq_ctx_buf_full_t buf = {};
if (txq_index > QTX_COMM_HEAD_MAX_INDEX) return -EINVAL;
/** * ice_write_txq_ctx - Write Tx queue context to HW * @hw: pointer to the hardware structure * @tlan_ctx: pointer to the Tx queue context * @txq_index: the index of the Tx queue * * Pack the Tx queue context into the dense HW layout, then write it into the * HW registers. * * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
*/ int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
u32 txq_index)
{
ice_txq_ctx_buf_full_t buf = {};
if (txq_index > QTX_COMM_HEAD_MAX_INDEX) return -EINVAL;
if (in->opcode)
msg.data = cpu_to_le32(in->data); else /* data read comes back in completion, so shorten the struct by * sizeof(msg.data)
*/
msg_len -= sizeof(msg.data);
/* Software lock/mutex that is meant to be held while the Global Config Lock * in firmware is acquired by the software to prevent most (but not all) types * of AQ commands from being sent to FW
*/
DEFINE_MUTEX(ice_global_cfg_lock_sw);
/** * ice_should_retry_sq_send_cmd * @opcode: AQ opcode * * Decide if we should retry the send command routine for the ATQ, depending * on the opcode.
*/ staticbool ice_should_retry_sq_send_cmd(u16 opcode)
{ switch (opcode) { case ice_aqc_opc_get_link_topo: case ice_aqc_opc_lldp_stop: case ice_aqc_opc_lldp_start: case ice_aqc_opc_lldp_filter_ctrl: returntrue;
}
returnfalse;
}
/** * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned.
*/ staticint
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct libie_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd)
{ struct libie_aq_desc desc_cpy; bool is_cmd_for_retry;
u8 idx = 0;
u16 opcode; int status;
if (is_cmd_for_retry) { /* All retryable cmds are direct, without buf. */
WARN_ON(buf);
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
}
do {
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY) break;
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
msleep(ICE_SQ_SEND_DELAY_TIME_MS);
} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
return status;
}
/** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct * @desc: descriptor describing the command * @buf: buffer to use for indirect commands (NULL for direct commands) * @buf_size: size of buffer for indirect commands (0 for direct commands) * @cd: pointer to command details structure * * Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/ int
ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{ struct libie_aqc_req_res *cmd = libie_aq_raw(desc); bool lock_acquired = false; int status;
/* When a package download is in process (i.e. when the firmware's * Global Configuration Lock resource is held), only the Download * Package, Get Version, Get Package Info List, Upload Section, * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get * Recipes to Profile Association, and Release Resource (with resource * ID set to Global Config Lock) AdminQ commands are allowed; all others * must block until the package download completes and the Global Config * Lock is released. See also ice_acquire_global_cfg_lock().
*/ switch (le16_to_cpu(desc->opcode)) { case ice_aqc_opc_download_pkg: case ice_aqc_opc_get_pkg_info_list: case ice_aqc_opc_get_ver: case ice_aqc_opc_upload_section: case ice_aqc_opc_update_pkg: case ice_aqc_opc_set_port_params: case ice_aqc_opc_get_vlan_mode_parameters: case ice_aqc_opc_set_vlan_mode_parameters: case ice_aqc_opc_set_tx_topo: case ice_aqc_opc_get_tx_topo: case ice_aqc_opc_add_recipe: case ice_aqc_opc_recipe_to_profile: case ice_aqc_opc_get_recipe: case ice_aqc_opc_get_recipe_to_profile: break; case ice_aqc_opc_release_res: if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK) break;
fallthrough; default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true; break;
}
status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); if (lock_acquired)
mutex_unlock(&ice_global_cfg_lock_sw);
return status;
}
/** * ice_aq_get_fw_ver * @hw: pointer to the HW struct * @cd: pointer to command details structure or NULL * * Get the firmware version (0x0001) from the admin queue commands
*/ int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{ struct libie_aqc_get_ver *resp; struct libie_aq_desc desc; int status;
/** * ice_aq_send_driver_ver * @hw: pointer to the HW struct * @dv: driver's major, minor version * @cd: pointer to command details structure or NULL * * Send the driver version (0x0002) to the firmware
*/ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd)
{ struct libie_aqc_driver_ver *cmd; struct libie_aq_desc desc;
u16 len;
/** * ice_aq_q_shutdown * @hw: pointer to the HW struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003).
*/ int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{ struct ice_aqc_q_shutdown *cmd; struct libie_aq_desc desc;
/** * ice_aq_req_res * @hw: pointer to the HW struct * @res: resource ID * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: * 1) 0 - acquired lock, and can perform download package * 2) -EIO - did not get lock, driver should fail to load * 3) -EALREADY - did not get lock, but another driver has * successfully downloaded the package; the driver does * not have to download the package and can continue * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue * a CORER. In this case, the driver will receive a CORER interrupt and will * have to determine its cause. The calling thread that is handling this flow * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out.
*/ staticint
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd)
{ struct libie_aqc_req_res *cmd_resp; struct libie_aq_desc desc; int status;
/* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field.
*/
/* Global config lock response utilizes an additional status field. * * If the Global config lock resource is held by some other driver, the * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field * and the timeout field indicates the maximum time the current owner * of the resource has to free it.
*/ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
*timeout = le32_to_cpu(cmd_resp->timeout); return 0;
} elseif (le16_to_cpu(cmd_resp->status) ==
LIBIE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout); return -EIO;
} elseif (le16_to_cpu(cmd_resp->status) ==
LIBIE_AQ_RES_GLBL_DONE) { return -EALREADY;
}
/* invalid FW response, force a timeout immediately */
*timeout = 0; return -EIO;
}
/* If the resource is held by some other driver, the command completes * with a busy return value and the timeout field indicates the maximum * time the current owner of the resource has to free it.
*/ if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
}
/** * ice_aq_release_res * @hw: pointer to the HW struct * @res: resource ID * @sdp_number: resource number * @cd: pointer to command details structure or NULL * * release common resource using the admin queue commands (0x0009)
*/ staticint
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd)
{ struct libie_aqc_req_res *cmd; struct libie_aq_desc desc;
/** * ice_acquire_res * @hw: pointer to the HW structure * @res: resource ID * @access: access type (read or write) * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource.
*/ int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout)
{ #define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout; int status;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
/* A return code of -EALREADY means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do.
*/ if (status == -EALREADY) goto ice_acquire_res_exit;
if (status)
ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
/* If necessary, poll until the current lock owner timeouts */
timeout = time_left; while (status && timeout && time_left) {
mdelay(delay);
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
if (status == -EALREADY) /* lock free, but no work to do */ break;
if (!status) /* lock acquired */ break;
} if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit: if (status == -EALREADY) { if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else
ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
} return status;
}
/** * ice_release_res * @hw: pointer to the HW structure * @res: resource ID * * This function will release a resource using the proper Admin Command.
*/ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{ unsignedlong timeout; int status;
/* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly
*/
timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; do {
status = ice_aq_release_res(hw, res, 0, NULL); if (status != -EIO) break;
usleep_range(1000, 2000);
} while (time_before(jiffies, timeout));
}
/** * ice_aq_alloc_free_res - command to allocate/free resources * @hw: pointer to the HW struct * @buf: Indirect buffer to hold data parameters and response * @buf_size: size of buffer for indirect commands * @opc: pass in the command opcode * * Helper function to allocate/free resources using the admin queue commands
*/ int ice_aq_alloc_free_res(struct ice_hw *hw, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc)
{ struct ice_aqc_alloc_free_res_cmd *cmd; struct libie_aq_desc desc;
cmd = libie_aq_raw(&desc);
if (!buf || buf_size < flex_array_size(buf, elem, 1)) return -EINVAL;
/** * ice_alloc_hw_res - allocate resource * @hw: pointer to the HW struct * @type: type of resource * @num: number of resources to allocate * @btm: allocate from bottom * @res: pointer to array that will receive the resources
*/ int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{ struct ice_aqc_alloc_free_res_elem *buf;
u16 buf_len; int status;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.