/* context for devlink info version reporting */ struct ice_info_ctx { char buf[128]; struct ice_orom_info pending_orom; struct ice_nvm_info pending_nvm; struct ice_netlist_info pending_netlist; struct ice_hw_dev_caps dev_caps;
};
/* The following functions are used to format specific strings for various * devlink info versions. The ctx parameter is used to provide the storage * buffer, as well as any ancillary information calculated when the info * request was made. * * If a version does not exist, for example when attempting to get the * inactive version of flash when there is no pending update, the function * should leave the buffer in the ctx structure empty.
*/
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) /* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
status);
}
/* The combined() macro inserts both the running entry as well as a stored * entry. The running entry will always report the version from the active * handler. The stored entry will first try the pending handler, and fallback * to the active handler if the pending function does not report a version. * The pending handler should check the status of a pending update for the * relevant flash component. It should only fill in the buffer in the case * where a valid pending version is available. This ensures that the related * stored and running versions remain in sync, and that stored versions are * correctly reported as expected.
*/ #define combined(key, active, pending) \
running(key, active), \
stored(key, pending, active)
/** * ice_devlink_info_get - .info_get devlink handler * @devlink: devlink instance structure * @req: the devlink info request * @extack: extended netdev ack structure * * Callback for the devlink .info_get operation. Reports information about the * device. * * Return: zero on success or an error code on failure.
*/ staticint ice_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack)
{ struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_info_ctx *ctx;
size_t i; int err;
err = ice_wait_for_reset(pf, 10 * HZ); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting"); return err;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM;
/* discover capabilities first */
err = ice_discover_dev_caps(hw, &ctx->dev_caps); if (err) {
dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); goto out_free_ctx;
}
if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); if (err) {
dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
}
}
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); if (err) {
dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
}
}
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); if (err) {
dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
}
}
ice_info_get_dsn(pf, ctx);
err = devlink_info_serial_number_put(req, ctx->buf); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number"); goto out_free_ctx;
}
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) { enum ice_version_type type = ice_devlink_versions[i].type; constchar *key = ice_devlink_versions[i].key;
memset(ctx->buf, 0, sizeof(ctx->buf));
ice_devlink_versions[i].getter(pf, ctx);
/* If the default getter doesn't report a version, use the * fallback function. This is primarily useful in the case of * "stored" versions that want to report the same value as the * running version in the normal case of no pending update.
*/ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
ice_devlink_versions[i].fallback(pf, ctx);
/* Do not report missing versions */ if (ctx->buf[0] == '\0') continue;
switch (type) { case ICE_VERSION_FIXED:
err = devlink_info_version_fixed_put(req, key, ctx->buf); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version"); goto out_free_ctx;
} break; case ICE_VERSION_RUNNING:
err = devlink_info_version_running_put_ext(req, key,
ctx->buf,
DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); goto out_free_ctx;
} break; case ICE_VERSION_STORED:
err = devlink_info_version_stored_put_ext(req, key,
ctx->buf,
DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); goto out_free_ctx;
} break;
}
}
out_free_ctx:
kfree(ctx); return err;
}
/** * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware * @pf: pointer to the pf instance * @extack: netlink extended ACK structure * * Allow user to activate new Embedded Management Processor firmware by * issuing device specific EMP reset. Called in response to * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. * * Note that teardown and rebuild of the driver state happens automatically as * part of an interrupt and watchdog task. This is because all physical * functions on the device must be able to reset when an EMP reset occurs from * any source.
*/ staticint
ice_devlink_reload_empr_start(struct ice_pf *pf, struct netlink_ext_ack *extack)
{ struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw;
u8 pending; int err;
err = ice_get_pending_updates(pf, &pending, extack); if (err) return err;
/* pending is a bitmask of which flash banks have a pending update, * including the main NVM bank, the Option ROM bank, and the netlist * bank. If any of these bits are set, then there is a pending update * waiting to be activated.
*/ if (!pending) {
NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); return -ECANCELED;
}
if (pf->fw_emp_reset_disabled) {
NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); return -ECANCELED;
}
dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
err = ice_aq_nvm_update_empr(hw); if (err) {
dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); return err;
}
return 0;
}
/** * ice_devlink_reinit_down - unload given PF * @pf: pointer to the PF struct
*/ staticvoid ice_devlink_reinit_down(struct ice_pf *pf)
{ /* No need to take devl_lock, it's already taken by devlink API */
ice_unload(pf);
rtnl_lock();
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
ice_deinit_dev(pf);
}
/** * ice_devlink_reload_down - prepare for reload * @devlink: pointer to the devlink instance to reload * @netns_change: if true, the network namespace is changing * @action: the action to perform * @limit: limits on what reload should do, such as not resetting * @extack: netlink extended ACK structure
*/ staticint
ice_devlink_reload_down(struct devlink *devlink, bool netns_change, enum devlink_reload_action action, enum devlink_reload_limit limit, struct netlink_ext_ack *extack)
{ struct ice_pf *pf = devlink_priv(devlink);
switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack, "Go to legacy mode before doing reinit"); return -EOPNOTSUPP;
} if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack, "Turn off ADQ before doing reinit"); return -EOPNOTSUPP;
} if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack, "Remove all VFs before doing reinit"); return -EOPNOTSUPP;
}
ice_devlink_reinit_down(pf); return 0; case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: return ice_devlink_reload_empr_start(pf, extack); default:
WARN_ON(1); return -EOPNOTSUPP;
}
}
/** * ice_devlink_reload_empr_finish - Wait for EMP reset to finish * @pf: pointer to the pf instance * @extack: netlink extended ACK structure * * Wait for driver to finish rebuilding after EMP reset is completed. This * includes time to wait for both the actual device reset as well as the time * for the driver's rebuild to complete.
*/ staticint
ice_devlink_reload_empr_finish(struct ice_pf *pf, struct netlink_ext_ack *extack)
{ int err;
err = ice_wait_for_reset(pf, 60 * HZ); if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); return err;
}
return 0;
}
/** * ice_get_tx_topo_user_sel - Read user's choice from flash * @pf: pointer to pf structure * @layers: value read from flash will be saved here * * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. * * Return: zero when read was successful, negative values otherwise.
*/ staticint ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; struct ice_hw *hw = &pf->hw; int err;
err = ice_acquire_nvm(hw, ICE_RES_READ); if (err) return err;
/** * ice_update_tx_topo_user_sel - Save user's preference in flash * @pf: pointer to pf structure * @layers: value to be saved in flash * * Variable "layers" defines user's preference about number of layers in Tx * Scheduler Topology Tree. This choice should be stored in PFA TLV field * and be picked up by driver, next time during init. * * Return: zero when save was successful, negative values otherwise.
*/ staticint ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
{ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; struct ice_hw *hw = &pf->hw; int err;
err = ice_acquire_nvm(hw, ICE_RES_WRITE); if (err) return err;
/** * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter * @devlink: pointer to the devlink instance * @id: the parameter ID to set * @ctx: context to store the parameter value * * Return: zero on success and negative value on failure.
*/ staticint ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx)
{ struct ice_pf *pf = devlink_priv(devlink); int err;
err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); if (err) return err;
return 0;
}
/** * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter * @devlink: pointer to the devlink instance * @id: the parameter ID to set * @ctx: context to get the parameter value * @extack: netlink extended ACK structure * * Return: zero on success and negative value on failure.
*/ staticint ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx, struct netlink_ext_ack *extack)
{ struct ice_pf *pf = devlink_priv(devlink); int err;
err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); if (err) return err;
NL_SET_ERR_MSG_MOD(extack, "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/** * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers * parameter value * @devlink: unused pointer to devlink instance * @id: the parameter ID to validate * @val: value to validate * @extack: netlink extended ACK structure * * Supported values are: * - 5 - five layers Tx Scheduler Topology Tree * - 9 - nine layers Tx Scheduler Topology Tree * * Return: zero when passed parameter value is supported. Negative value on * error.
*/ staticint ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack)
{ if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
NL_SET_ERR_MSG_MOD(extack, "Wrong number of tx scheduler layers provided."); return -EINVAL;
}
return 0;
}
/** * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree * @pf: pf struct * * This function tears down tree exported during VF's creation.
*/ void ice_tear_down_devlink_rate_tree(struct ice_pf *pf)
{ struct devlink *devlink; struct ice_vf *vf; unsignedint bkt;
/** * ice_traverse_tx_tree - traverse Tx scheduler tree * @devlink: devlink struct * @node: current node, used for recursion * @tc_node: tc_node struct, that is treated as a root * @pf: pf struct * * This function traverses Tx scheduler tree and exports * entire structure to the devlink-rate.
*/ staticvoid ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, struct ice_sched_node *tc_node, struct ice_pf *pf)
{ struct devlink_rate *rate_node = NULL; struct ice_dynamic_port *sf; struct ice_vf *vf; int i;
if (node->rate_node) /* already added, skip to the next */ goto traverse_children;
if (node->parent == tc_node) { /* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} elseif (node->vsi_handle &&
pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf; if (!vf->devlink_port.devlink_rate) /* leaf nodes doesn't have children * so we don't set rate_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
} elseif (node->vsi_handle &&
pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
pf->vsi[node->vsi_handle]->sf) {
sf = pf->vsi[node->vsi_handle]->sf; if (!sf->devlink_port.devlink_rate) /* leaf nodes doesn't have children * so we don't set rate_node
*/
devl_rate_leaf_create(&sf->devlink_port, node,
node->parent->rate_node);
} elseif (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
node->parent->rate_node);
}
if (rate_node && !IS_ERR(rate_node))
node->rate_node = rate_node;
traverse_children: for (i = 0; i < node->num_children; i++)
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
}
/** * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate * @devlink: devlink struct * @vsi: main vsi struct * * This function finds a root node, then calls ice_traverse_tx tree, which * traverses the tree and exports it's contents to devlink rate.
*/ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
{ struct ice_port_info *pi = vsi->port_info; struct ice_sched_node *tc_node; struct ice_pf *pf = vsi->back; int i;
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock); for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
mutex_unlock(&pi->sched_lock);
/** * ice_set_object_tx_share - sets node scheduling parameter * @pi: devlink struct instance * @node: node struct instance * @bw: bandwidth in bytes per second * @extack: extended netdev ack structure * * This function sets ICE_MIN_BW scheduling BW limit.
*/ staticint ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
u64 bw, struct netlink_ext_ack *extack)
{ int status;
mutex_lock(&pi->sched_lock); /* converts bytes per second to kilo bits per second */
node->tx_share = div_u64(bw, 125);
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
mutex_unlock(&pi->sched_lock);
if (status)
NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
return status;
}
/** * ice_set_object_tx_max - sets node scheduling parameter * @pi: devlink struct instance * @node: node struct instance * @bw: bandwidth in bytes per second * @extack: extended netdev ack structure * * This function sets ICE_MAX_BW scheduling BW limit.
*/ staticint ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
u64 bw, struct netlink_ext_ack *extack)
{ int status;
mutex_lock(&pi->sched_lock); /* converts bytes per second value to kilo bits per second */
node->tx_max = div_u64(bw, 125);
status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
mutex_unlock(&pi->sched_lock);
if (status)
NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
return status;
}
/** * ice_set_object_tx_priority - sets node scheduling parameter * @pi: devlink struct instance * @node: node struct instance * @priority: value representing priority for strict priority arbitration * @extack: extended netdev ack structure * * This function sets priority of node among siblings.
*/ staticint ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u32 priority, struct netlink_ext_ack *extack)
{ int status;
if (priority >= 8) {
NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); return -EINVAL;
}
mutex_lock(&pi->sched_lock);
node->tx_priority = priority;
status = ice_sched_set_node_priority(pi, node, node->tx_priority);
mutex_unlock(&pi->sched_lock);
if (status)
NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
return status;
}
/** * ice_set_object_tx_weight - sets node scheduling parameter * @pi: devlink struct instance * @node: node struct instance * @weight: value represeting relative weight for WFQ arbitration * @extack: extended netdev ack structure * * This function sets node weight for WFQ algorithm.
*/ staticint ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
u32 weight, struct netlink_ext_ack *extack)
{ int status;
if (weight > 200 || weight < 1) {
NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); return -EINVAL;
}
mutex_lock(&pi->sched_lock);
node->tx_weight = weight;
status = ice_sched_set_node_weight(pi, node, node->tx_weight);
mutex_unlock(&pi->sched_lock);
if (status)
NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
return status;
}
/** * ice_get_pi_from_dev_rate - get port info from devlink_rate * @rate_node: devlink struct instance * * This function returns corresponding port_info struct of devlink_rate
*/ staticstruct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
{ struct ice_pf *pf = devlink_priv(rate_node->devlink);
/** * ice_devlink_reinit_up - do reinit of the given PF * @pf: pointer to the PF struct
*/ staticint ice_devlink_reinit_up(struct ice_pf *pf)
{ struct ice_vsi *vsi = ice_get_main_vsi(pf); int err;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP;
if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP;
}
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP;
if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP;
}
/** * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. * @mode: local forwarding for mode used in port_info struct. * * Return: Mode respective string or "Invalid".
*/ staticconstchar *
ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
{ switch (mode) { case ICE_LOCAL_FWD_MODE_ENABLED: return DEVLINK_LOCAL_FWD_ENABLED_STR; case ICE_LOCAL_FWD_MODE_PRIORITIZED: return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; case ICE_LOCAL_FWD_MODE_DISABLED: return DEVLINK_LOCAL_FWD_DISABLED_STR;
}
return"Invalid";
}
/** * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. * @mode_str: local forwarding mode string. * * Return: Mode value or negative number if invalid.
*/ staticint ice_devlink_local_fwd_str_to_mode(constchar *mode_str)
{ if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) return ICE_LOCAL_FWD_MODE_ENABLED; elseif (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) return ICE_LOCAL_FWD_MODE_PRIORITIZED; elseif (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) return ICE_LOCAL_FWD_MODE_DISABLED;
return -EINVAL;
}
/** * ice_devlink_local_fwd_get - Get local_fwd parameter. * @devlink: Pointer to the devlink instance. * @id: The parameter ID to set. * @ctx: Context to store the parameter value. * * Return: Zero.
*/ staticint ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx)
{ struct ice_pf *pf = devlink_priv(devlink); struct ice_port_info *pi; constchar *mode_str;
pi = pf->hw.port_info;
mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
return 0;
}
/** * ice_devlink_local_fwd_set - Set local_fwd parameter. * @devlink: Pointer to the devlink instance. * @id: The parameter ID to set. * @ctx: Context to get the parameter value. * @extack: Netlink extended ACK structure. * * Return: Zero.
*/ staticint ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx, struct netlink_ext_ack *extack)
{ int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_port_info *pi;
pi = pf->hw.port_info; if (pi->local_fwd_mode != new_local_fwd_mode) {
pi->local_fwd_mode = new_local_fwd_mode;
dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
ice_schedule_reset(pf, ICE_RESET_CORER);
}
return 0;
}
/** * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. * @devlink: Unused pointer to devlink instance. * @id: The parameter ID to validate. * @val: Value to validate. * @extack: Netlink extended ACK structure. * * Supported values are: * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled * "prioritized" - local_fwd traffic is prioritized in scheduling. * * Return: Zero when passed parameter value is supported. Negative value on * error.
*/ staticint ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack)
{ if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); return -EINVAL;
}
/** * ice_allocate_pf - Allocate devlink and return PF structure pointer * @dev: the device to allocate for * * Allocate a devlink instance for this device and return the private area as * the PF structure. The devlink memory is kept track of through devres by * adding an action to remove it when unwinding.
*/ struct ice_pf *ice_allocate_pf(struct device *dev)
{ struct devlink *devlink;
devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL;
/* Add an action to teardown the devlink when unwinding the driver */ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) return NULL;
return devlink_priv(devlink);
}
/** * ice_allocate_sf - Allocate devlink and return SF structure pointer * @dev: the device to allocate for * @pf: pointer to the PF structure * * Allocate a devlink instance for SF. * * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
*/ struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
{ struct devlink *devlink; int err;
devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
dev); if (!devlink) return ERR_PTR(-ENOMEM);
/** * ice_devlink_register - Register devlink interface for this PF * @pf: the PF to register the devlink for. * * Register the devlink instance associated with this physical function. * * Return: zero on success or an error code on failure.
*/ void ice_devlink_register(struct ice_pf *pf)
{ struct devlink *devlink = priv_to_devlink(pf);
devl_register(devlink);
}
/** * ice_devlink_unregister - Unregister devlink resources for this PF. * @pf: the PF structure to cleanup * * Releases resources used by devlink and cleans up associated memory.
*/ void ice_devlink_unregister(struct ice_pf *pf)
{
devl_unregister(priv_to_devlink(pf));
}
int ice_devlink_register_params(struct ice_pf *pf)
{ struct devlink *devlink = priv_to_devlink(pf); union devlink_param_value value; struct ice_hw *hw = &pf->hw; int status;
status = devl_params_register(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params)); if (status) return status;
status = devl_params_register(devlink, ice_dvl_msix_params,
ARRAY_SIZE(ice_dvl_msix_params)); if (status) goto unregister_rdma_params;
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params)); if (status) goto unregister_msix_params;
/** * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents * @devlink: the devlink instance * @ops: the devlink region to snapshot * @extack: extended ACK response structure * @data: on exit points to snapshot data buffer * * This function is called in response to a DEVLINK_CMD_REGION_NEW for either * the nvm-flash or shadow-ram region. * * It captures a snapshot of the NVM or Shadow RAM flash contents. This * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink * interface. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure.
*/ staticint ice_devlink_nvm_snapshot(struct devlink *devlink, conststruct devlink_region_ops *ops, struct netlink_ext_ack *extack, u8 **data)
{ struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; bool read_shadow_ram;
u8 *nvm_data, *tmp, i;
u32 nvm_size, left;
s8 num_blks; int status;
nvm_data = vzalloc(nvm_size); if (!nvm_data) return -ENOMEM;
num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
tmp = nvm_data;
left = nvm_size;
/* Some systems take longer to read the NVM than others which causes the * FW to reclaim the NVM lock before the entire NVM has been read. Fix * this by breaking the reads of the NVM into smaller chunks that will * probably not take as long. This has some overhead since we are * increasing the number of AQ commands, but it should always work
*/ for (i = 0; i < num_blks; i++) {
u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data); return -EIO;
}
status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
&read_sz, tmp, read_shadow_ram); if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
read_sz, status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data); return -EIO;
}
ice_release_nvm(hw);
tmp += read_sz;
left -= read_sz;
}
*data = nvm_data;
return 0;
}
/** * ice_devlink_nvm_read - Read a portion of NVM flash contents * @devlink: the devlink instance * @ops: the devlink region to snapshot * @extack: extended ACK response structure * @offset: the offset to start at * @size: the amount to read * @data: the data buffer to read into * * This function is called in response to DEVLINK_CMD_REGION_READ to directly * read a section of the NVM contents. * * It reads from either the nvm-flash or shadow-ram region contents. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure.
*/ staticint ice_devlink_nvm_read(struct devlink *devlink, conststruct devlink_region_ops *ops, struct netlink_ext_ack *extack,
u64 offset, u32 size, u8 *data)
{ struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; bool read_shadow_ram;
u64 nvm_size; int status;
if (offset + size >= nvm_size) {
NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); return -ERANGE;
}
status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); return -EIO;
}
status = ice_read_flat_nvm(hw, (u32)offset, &size, data,
read_shadow_ram); if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
size, status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw); return -EIO;
}
ice_release_nvm(hw);
return 0;
}
/** * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities * @devlink: the devlink instance * @ops: the devlink region being snapshotted * @extack: extended ACK response structure * @data: on exit points to snapshot data buffer * * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for * the device-caps devlink region. It captures a snapshot of the device * capabilities reported by firmware. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure.
*/ staticint
ice_devlink_devcaps_snapshot(struct devlink *devlink, conststruct devlink_region_ops *ops, struct netlink_ext_ack *extack, u8 **data)
{ struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; void *devcaps; int status;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); if (!devcaps) return -ENOMEM;
status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
ice_aqc_opc_list_dev_caps, NULL); if (status) {
dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps); return status;
}
/** * ice_devlink_init_regions - Initialize devlink regions * @pf: the PF device structure * * Create devlink regions used to enable access to dump the contents of the * flash memory on the device.
*/ void ice_devlink_init_regions(struct ice_pf *pf)
{ struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf);
u64 nvm_size, sram_size;
/** * ice_devlink_destroy_regions - Destroy devlink regions * @pf: the PF device structure * * Remove previously created regions for this PF.
*/ void ice_devlink_destroy_regions(struct ice_pf *pf)
{ if (pf->nvm_region)
devl_region_destroy(pf->nvm_region);
if (pf->sram_region)
devl_region_destroy(pf->sram_region);
if (pf->devcaps_region)
devl_region_destroy(pf->devcaps_region);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.