/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code * @v_retval: return value * @msg: pointer to the msg buffer * @msglen: msg length
*/ staticvoid
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{ struct ice_hw *hw = &pf->hw; struct ice_vf *vf; unsignedint bkt;
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) { /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) continue;
/* Ignore return value on purpose - a given VF may fail, but * we need to keep going and send to all of them
*/
ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
msglen, NULL);
}
mutex_unlock(&pf->vfs.table_lock);
}
/** * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event * @vf: pointer to the VF structure * @pfe: pointer to the virtchnl_pf_event to set link speed/status for * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* * @link_up: whether or not to set the link up/down
*/ staticvoid
ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, int ice_link_speed, bool link_up)
{ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
pfe->event_data.link_event_adv.link_status = link_up; /* Speed in Mbps */
pfe->event_data.link_event_adv.link_speed =
ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
} else {
pfe->event_data.link_event.link_status = link_up; /* Legacy method for virtchnl link speeds */
pfe->event_data.link_event.link_speed =
(enum virtchnl_link_speed)
ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
}
}
/** * ice_vc_notify_vf_link_state - Inform a VF of link status * @vf: pointer to the VF structure * * send a link status message to a single VF
*/ void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{ struct virtchnl_pf_event pfe = { 0 }; struct ice_hw *hw = &vf->pf->hw;
/** * ice_vc_notify_link_state - Inform all VFs on a PF of link status * @pf: pointer to the PF structure
*/ void ice_vc_notify_link_state(struct ice_pf *pf)
{ struct ice_vf *vf; unsignedint bkt;
/** * ice_vc_notify_reset - Send pending reset message to all VFs * @pf: pointer to the PF structure * * indicate a pending reset to all VFs on a given PF
*/ void ice_vc_notify_reset(struct ice_pf *pf)
{ struct virtchnl_pf_event pfe;
/** * ice_vc_send_msg_to_vf - Send message to VF * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length * * send msg to VF
*/ int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{ struct device *dev; struct ice_pf *pf; int aq_ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
vf->vf_id, aq_ret,
libie_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO;
}
return 0;
}
/** * ice_vc_get_ver_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to request the API version used by the PF
*/ staticint ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
{ struct virtchnl_version_info info = {
VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
};
vf->vf_ver = *(struct virtchnl_version_info *)msg; /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
/** * ice_vc_get_max_frame_size - get max frame size allowed for VF * @vf: VF used to determine max frame size * * Max frame size is determined based on the current port's max frame size and * whether a port VLAN is configured on this VF. The VF is not aware whether * it's in a port VLAN so the PF needs to account for this in max frame size * checks and sending the max frame size to the VF.
*/ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
{ struct ice_port_info *pi = ice_vf_get_port_info(vf);
u16 max_frame_size;
if (ice_vf_is_port_vlan_ena(vf))
max_frame_size -= VLAN_HLEN;
return max_frame_size;
}
/** * ice_vc_get_vlan_caps * @hw: pointer to the hw * @vf: pointer to the VF info * @vsi: pointer to the VSI * @driver_caps: current driver caps * * Return 0 if there is no VLAN caps supported, or VLAN caps value
*/ static u32
ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
u32 driver_caps)
{ if (ice_is_eswitch_mode_switchdev(vf->pf)) /* In switchdev setting VLAN from VF isn't supported */ return 0;
if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { /* VLAN offloads based on current device configuration */ return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
} elseif (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for * these two conditions, which amounts to guest VLAN filtering * and offloads being based on the inner VLAN or the * inner/single VLAN respectively and don't allow VF to * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
*/ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) { return VIRTCHNL_VF_OFFLOAD_VLAN;
} elseif (!ice_is_dvm_ena(hw) &&
!ice_vf_is_port_vlan_ena(vf)) { /* configure backward compatible support for VFs that * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is * configured in SVM, and no port VLAN is configured
*/
ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); return VIRTCHNL_VF_OFFLOAD_VLAN;
} elseif (ice_is_dvm_ena(hw)) { /* configure software offloaded VLAN support when DVM * is enabled, but no port VLAN is enabled
*/
ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
}
}
return 0;
}
/** * ice_vc_get_vf_res_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to request its resources
*/ staticint ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_resource *vfres = NULL; struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi; int len = 0; int ret;
if (ice_check_vf_init(vf)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
}
len = virtchnl_struct_size(vfres, vsi_res, 0);
vfres = kzalloc(len, GFP_KERNEL); if (!vfres) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0; goto err;
} if (VF_IS_V11(&vf->vf_ver))
vf->driver_caps = *(u32 *)msg; else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
}
err: /* send the response back to the VF */
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
kfree(vfres); return ret;
}
/** * ice_vc_reset_vf_msg * @vf: pointer to the VF info * * called from the VF to reset itself, * unlike other virtchnl messages, PF driver * doesn't send the response back to the VF
*/ staticvoid ice_vc_reset_vf_msg(struct ice_vf *vf)
{ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
ice_reset_vf(vf, 0);
}
/** * ice_vc_isvalid_vsi_id * @vf: pointer to the VF info * @vsi_id: VF relative VSI ID * * check for the valid VSI ID
*/ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{ return vsi_id == ICE_VF_VSI_ID;
}
/** * ice_vc_isvalid_q_id * @vsi: VSI to check queue ID against * @qid: VSI relative queue ID * * check for the valid queue ID
*/ staticbool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
{ /* allocated Tx and Rx queues should be always equal for VF VSI */ return qid < vsi->alloc_txq;
}
/** * ice_vc_isvalid_ring_len * @ring_len: length of ring * * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE * or zero
*/ staticbool ice_vc_isvalid_ring_len(u16 ring_len)
{ return ring_len == 0 ||
(ring_len >= ICE_MIN_NUM_DESC &&
ring_len <= ICE_MAX_NUM_DESC &&
!(ring_len % ICE_REQ_DESC_MULTIPLE));
}
/** * ice_vc_validate_pattern * @vf: pointer to the VF info * @proto: virtchnl protocol headers * * validate the pattern is supported or not. * * Return: true on success, false on error.
*/ bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
{ bool is_ipv4 = false; bool is_ipv6 = false; bool is_udp = false;
u16 ptype = -1; int i = 0;
while (i < proto->count &&
proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { switch (proto->proto_hdr[i].type) { case VIRTCHNL_PROTO_HDR_ETH:
ptype = ICE_PTYPE_MAC_PAY; break; case VIRTCHNL_PROTO_HDR_IPV4:
ptype = ICE_PTYPE_IPV4_PAY;
is_ipv4 = true; break; case VIRTCHNL_PROTO_HDR_IPV6:
ptype = ICE_PTYPE_IPV6_PAY;
is_ipv6 = true; break; case VIRTCHNL_PROTO_HDR_UDP: if (is_ipv4)
ptype = ICE_PTYPE_IPV4_UDP_PAY; elseif (is_ipv6)
ptype = ICE_PTYPE_IPV6_UDP_PAY;
is_udp = true; break; case VIRTCHNL_PROTO_HDR_TCP: if (is_ipv4)
ptype = ICE_PTYPE_IPV4_TCP_PAY; elseif (is_ipv6)
ptype = ICE_PTYPE_IPV6_TCP_PAY; break; case VIRTCHNL_PROTO_HDR_SCTP: if (is_ipv4)
ptype = ICE_PTYPE_IPV4_SCTP_PAY; elseif (is_ipv6)
ptype = ICE_PTYPE_IPV6_SCTP_PAY; break; case VIRTCHNL_PROTO_HDR_GTPU_IP: case VIRTCHNL_PROTO_HDR_GTPU_EH: if (is_ipv4)
ptype = ICE_MAC_IPV4_GTPU; elseif (is_ipv6)
ptype = ICE_MAC_IPV6_GTPU; goto out; case VIRTCHNL_PROTO_HDR_L2TPV3: if (is_ipv4)
ptype = ICE_MAC_IPV4_L2TPV3; elseif (is_ipv6)
ptype = ICE_MAC_IPV6_L2TPV3; goto out; case VIRTCHNL_PROTO_HDR_ESP: if (is_ipv4)
ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
ICE_MAC_IPV4_ESP; elseif (is_ipv6)
ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
ICE_MAC_IPV6_ESP; goto out; case VIRTCHNL_PROTO_HDR_AH: if (is_ipv4)
ptype = ICE_MAC_IPV4_AH; elseif (is_ipv6)
ptype = ICE_MAC_IPV6_AH; goto out; case VIRTCHNL_PROTO_HDR_PFCP: if (is_ipv4)
ptype = ICE_MAC_IPV4_PFCP_SESSION; elseif (is_ipv6)
ptype = ICE_MAC_IPV6_PFCP_SESSION; goto out; default: break;
}
i++;
}
/** * ice_vc_parse_rss_cfg - parses hash fields and headers from * a specific virtchnl RSS cfg * @hw: pointer to the hardware * @rss_cfg: pointer to the virtchnl RSS cfg * @hash_cfg: pointer to the HW hash configuration * * Return true if all the protocol header and hash fields in the RSS cfg could * be parsed, else return false * * This function parses the virtchnl RSS cfg to be the intended * hash fields and the intended header for RSS configuration
*/ staticbool ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, struct ice_rss_hash_cfg *hash_cfg)
{ conststruct ice_vc_hash_field_match_type *hf_list; conststruct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len;
u32 *addl_hdrs = &hash_cfg->addl_hdrs;
u64 *hash_flds = &hash_cfg->hash_flds;
/* set outer layer RSS as default */
hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr =
&rss_cfg->proto_hdrs.proto_hdr[i]; bool hdr_found = false; int j;
/* Find matched ice headers according to virtchnl headers. */ for (j = 0; j < hdr_list_len; j++) { struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
/** * ice_vc_handle_rss_cfg * @vf: pointer to the VF info * @msg: pointer to the message buffer * @add: add a RSS config if true, otherwise delete a RSS config * * This function adds/deletes a RSS config
*/ staticint ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
{
u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG; struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct device *dev = ice_pf_to_dev(vf->pf); struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi;
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; goto error_param;
}
if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { struct ice_vsi_ctx *ctx;
u8 lut_type, hash_type; int status;
if (add) { if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
}
} else { int status;
status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); /* We just ignore -ENOENT, because if two configurations * share the same profile remove one of them actually * removes both, since the profile is deleted.
*/ if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
vf->vf_id, status);
}
}
}
/* Store the UP2TC configuration from DCB to a user priority bitmap * of each TC. Each element of prio_of_tc represents one TC. Each * bitmap indicates the user priorities belong to this TC.
*/ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i];
tc_prio[tc] |= BIT(i);
}
/** * ice_vf_cfg_qs_bw - Configure per queue bandwidth * @vf: pointer to the VF info * @num_queues: number of queues to be configured * * Configure per queue bandwidth. * * Return: 0 on success or negative error value.
*/ staticint ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
{ struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi; int ret;
u16 i;
vsi = ice_get_vf_vsi(vf); if (!vsi) return -EINVAL;
for (i = 0; i < num_queues; i++) {
u32 p_rate, min_rate;
u8 tc;
p_rate = vf->qs_bw[i].peak;
min_rate = vf->qs_bw[i].committed;
tc = vf->qs_bw[i].tc; if (p_rate)
ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
vf->qs_bw[i].queue_id,
ICE_MAX_BW, p_rate); else
ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
vf->qs_bw[i].queue_id,
ICE_MAX_BW); if (ret) return ret;
if (min_rate)
ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
vf->qs_bw[i].queue_id,
ICE_MIN_BW, min_rate); else
ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
vf->qs_bw[i].queue_id,
ICE_MIN_BW);
if (ret) return ret;
}
return 0;
}
/** * ice_vf_cfg_q_quanta_profile - Configure quanta profile * @vf: pointer to the VF info * @quanta_prof_idx: pointer to the quanta profile index * @quanta_size: quanta size to be set * * This function chooses available quanta profile and configures the register. * The quanta profile is evenly divided by the number of device ports, and then * available to the specific PF and VFs. The first profile for each PF is a * reserved default profile. Only quanta size of the rest unused profile can be * modified. * * Return: 0 on success or negative error value.
*/ staticint ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
u16 *quanta_prof_idx)
{ const u16 n_desc = calc_quanta_desc(quanta_size); struct ice_hw *hw = &vf->pf->hw; const u16 n_cmd = 2 * n_desc; struct ice_pf *pf = vf->pf;
u16 per_pf, begin_id;
u8 n_used;
u32 reg;
/** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to configure VF VSIs promiscuous mode
*/ staticint ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; bool rm_promisc, alluni = false, allmulti = false; struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg; struct ice_vsi_vlan_ops *vlan_ops; int mcast_err = 0, ucast_err = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi;
u8 mcast_m, ucast_m; struct device *dev; int ret = 0;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
dev = ice_pf_to_dev(pf); if (!ice_is_vf_trusted(vf)) {
dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id); /* Leave v_ret alone, lie to the VF on purpose. */ goto error_param;
}
if (info->flags & FLAG_VF_UNICAST_PROMISC)
alluni = true;
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
rm_promisc = !allmulti && !alluni;
vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); if (rm_promisc)
ret = vlan_ops->ena_rx_filtering(vsi); else
ret = vlan_ops->dis_rx_filtering(vsi); if (ret) {
dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { if (alluni) { /* in this case we're turning on promiscuous mode */
ret = ice_set_dflt_vsi(vsi);
} else { /* in this case we're turning off promiscuous mode */ if (ice_is_dflt_vsi_in_use(vsi->port_info))
ret = ice_clear_dflt_vsi(vsi);
}
/* in this case we're turning on/off only * allmulticast
*/ if (allmulti)
mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); else
mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
/** * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL * @vsi: VSI of the VF to configure * @q_idx: VF queue index used to determine the queue in the PF's space
*/ void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{ struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->txq_map[q_idx];
u32 reg;
reg = rd32(hw, QINT_TQCTL(pfq));
/* MSI-X index 0 in the VF's space is always for the OICR, which means * this is most likely a poll mode VF driver, so don't enable an * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
*/ if (!(reg & QINT_TQCTL_MSIX_INDX_M)) return;
/** * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL * @vsi: VSI of the VF to configure * @q_idx: VF queue index used to determine the queue in the PF's space
*/ void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
{ struct ice_hw *hw = &vsi->back->hw;
u32 pfq = vsi->rxq_map[q_idx];
u32 reg;
reg = rd32(hw, QINT_RQCTL(pfq));
/* MSI-X index 0 in the VF's space is always for the OICR, which means * this is most likely a poll mode VF driver, so don't enable an * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
*/ if (!(reg & QINT_RQCTL_MSIX_INDX_M)) return;
/** * ice_vc_ena_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to enable all or specific queue(s)
*/ staticint ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg; struct ice_vsi *vsi; unsignedlong q_map;
u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* Enable only Rx rings, Tx rings were enabled by the FW when the * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* Skip queue if enabled */ if (test_bit(vf_q_id, vf->rxq_ena)) continue;
if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* Set flag to indicate that queues are enabled */ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
NULL, 0);
}
/** * ice_vf_vsi_dis_single_txq - disable a single Tx queue * @vf: VF to disable queue for * @vsi: VSI for the VF * @q_id: VF relative (0-based) queue ID * * Attempt to disable the Tx queue passed in. If the Tx queue was successfully * disabled then clear q_id bit in the enabled queues bitmap and return * success. Otherwise return error.
*/ int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
{ struct ice_txq_meta txq_meta = { 0 }; struct ice_tx_ring *ring; int err;
if (!test_bit(q_id, vf->txq_ena))
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
q_id, vsi->vsi_num);
ring = vsi->tx_rings[q_id]; if (!ring) return -EINVAL;
ice_fill_txq_meta(vsi, ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta); if (err) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
q_id, vsi->vsi_num); return err;
}
/* Clear enabled queues flag */
clear_bit(q_id, vf->txq_ena);
return 0;
}
/** * ice_vc_dis_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to disable all or specific queue(s)
*/ staticint ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg; struct ice_vsi *vsi; unsignedlong q_map;
u16 vf_q_id;
q_map = vqs->rx_queues; /* speed up Rx queue disable by batching them if possible */ if (q_map &&
bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { if (ice_vsi_stop_all_rx_rings(vsi)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->rxq_ena)) continue;
if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id, true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
}
}
/* Clear enabled queues flag */ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
NULL, 0);
}
/** * ice_cfg_interrupt * @vf: pointer to the VF info * @vsi: the VSI being configured * @map: vector map for mapping vectors to queues * @q_vector: structure for interrupt vector * configure the IRQ to queue map
*/ staticenum virtchnl_status_code
ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, struct virtchnl_vector_map *map, struct ice_q_vector *q_vector)
{
u16 vsi_q_id, vsi_q_id_idx; unsignedlong qmap;
/** * ice_vc_cfg_irq_map_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * * called from the VF to configure the IRQ to queue map
*/ staticint ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
u16 num_q_vectors_mapped, vsi_id, vector_id; struct virtchnl_irq_map_info *irqmap_info; struct virtchnl_vector_map *map; struct ice_vsi *vsi; int i;
/* Check to make sure number of VF vectors mapped is not greater than * number of VF vectors originally allocated, and check that * there is actually at least a single VF queue vector mapped
*/ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
for (i = 0; i < num_q_vectors_mapped; i++) { struct ice_q_vector *q_vector;
map = &irqmap_info->vecmap[i];
vector_id = map->vector_id;
vsi_id = map->vsi_id; /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF
*/ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* No need to map VF miscellaneous or rogue vector */ if (!vector_id) continue;
/* Subtract non queue vector from vector_id passed by VF * to get actual number of VSI queue vector array index
*/
q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; if (!q_vector) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param;
}
/* lookout for the invalid queue index */
v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector); if (v_ret) goto error_param;
}
error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
NULL, 0);
}
/** * ice_vc_cfg_q_bw - Configure per queue bandwidth * @vf: pointer to the VF info * @msg: pointer to the msg buffer which holds the command descriptor * * Configure VF queues bandwidth. * * Return: 0 on success or negative error value.
*/ staticint ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
{ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queues_bw_cfg *qbw =
(struct virtchnl_queues_bw_cfg *)msg; struct ice_vsi *vsi;
u16 i;
vsi = ice_get_vf_vsi(vf); if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
}
if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
}
for (i = 0; i < qbw->num_queues; i++) { if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->max_tx_rate);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
} if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->min_tx_rate);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
} if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
} if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err;
}
}
for (i = 0; i < qbw->num_queues; i++) {
vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
vf->qs_bw[i].tc = qbw->cfg[i].tc;
}
if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
err: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
v_ret, NULL, 0);
}
/** * ice_vc_cfg_q_quanta - Configure per queue quanta * @vf: pointer to the VF info * @msg: pointer to the msg buffer which holds the command descriptor * * Configure VF queues quanta. * * Return: 0 on success or negative error value.
*/ staticint ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
{
u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_quanta_cfg *qquanta =
(struct virtchnl_quanta_cfg *)msg; struct ice_vsi *vsi; int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param;
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) goto error_param;
vsi = ice_get_vf_vsi(vf); if (!vsi) goto error_param;
if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); goto error_param;
}
for (i = 0; i < qci->num_queue_pairs; i++) { if (!qci->qpair[i].rxq.crc_disable) continue;
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
vf->vlan_strip_ena) goto error_param;
}
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i]; if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) { goto error_param;
}
q_idx = qpi->rxq.queue_id;
/* make sure selected "q_idx" is in valid range of queues * for selected "vsi"
*/ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { goto error_param;
}
/* copy Tx queue info from VF into VSI */ if (qpi->txq.ring_len > 0) {
vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
/* Disable any existing queue first */ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) goto error_param;
/* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
vf->vf_id, q_idx); goto error_param;
}
}
/* copy Rx queue info from VF into VSI */ if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf); struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
u32 rxdid;
ring->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation
*/ if (ice_vf_is_port_vlan_ena(vf))
ring->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
vf->vf_id, q_idx); goto error_param;
}
/* If Rx flex desc is supported, select RXDID for Rx * queues. Otherwise, use legacy 32byte descriptor * format. Legacy 16byte descriptor is not supported. * If this RXDID is selected, return error.
*/ if (vf->driver_caps &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
rxdid = qpi->rxq.rxdid; if (!(BIT(rxdid) & pf->supported_rxdids)) goto error_param;
} else {
rxdid = ICE_RXDID_LEGACY_1;
}
/* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
error_param: /* disable whatever we can */ for (; i >= 0; i--) { if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
vf->vf_id, i); if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
vf->vf_id, i);
}
/* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
}
/** * ice_can_vf_change_mac * @vf: pointer to the VF info * * Return true if the VF is allowed to change its MAC filters, false otherwise
*/ staticbool ice_can_vf_change_mac(struct ice_vf *vf)
{ /* If the VF MAC address has been set administratively (via the * ndo_set_vf_mac command), then deny permission to the VF to * add/delete unicast MAC addresses, unless the VF is trusted
*/ if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) returnfalse;
returntrue;
}
/** * ice_vc_ether_addr_type - get type of virtchnl_ether_addr * @vc_ether_addr: used to extract the type
*/ static u8
ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
{ return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
}
/** * ice_is_vc_addr_legacy - check if the MAC address is from an older VF * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
*/ staticbool
ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
{
u8 type = ice_vc_ether_addr_type(vc_ether_addr);
return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
}
/** * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC * @vc_ether_addr: VIRTCHNL structure that contains MAC and type * * This function should only be called when the MAC address in * virtchnl_ether_addr is a valid unicast MAC
*/ staticbool
ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
{
u8 type = ice_vc_ether_addr_type(vc_ether_addr);
return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
}
/** * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed * @vf: VF to update * @vc_ether_addr: structure from VIRTCHNL with MAC to add
*/ staticvoid
ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
{
u8 *mac_addr = vc_ether_addr->addr;
if (!is_valid_ether_addr(mac_addr)) return;
/* only allow legacy VF drivers to set the device and hardware MAC if it * is zero and allow new VF drivers to set the hardware MAC if the type * was correctly specified over VIRTCHNL
*/ if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
is_zero_ether_addr(vf->hw_lan_addr)) ||
ice_is_vc_addr_primary(vc_ether_addr)) {
ether_addr_copy(vf->dev_lan_addr, mac_addr);
ether_addr_copy(vf->hw_lan_addr, mac_addr);
}
/* hardware and device MACs are already set, but its possible that the * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it * away for the legacy VF driver case as it will be updated in the * delete flow for this case
*/ if (ice_is_vc_addr_legacy(vc_ether_addr)) {
ether_addr_copy(vf->legacy_last_added_umac.addr,
mac_addr);
vf->legacy_last_added_umac.time_modified = jiffies;
}
}
/** * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address * @mac: address to check * * Return: true if the address is one of the three possible LLDP multicast * addresses, false otherwise.
*/ staticbool ice_is_mc_lldp_eth_addr(const u8 *mac)
{ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base))) returnfalse;
/** * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC * @vf: a VF to add the address to * @mac: address to check * * Return: true if the VF is allowed to add such MAC address, false otherwise.
*/ staticbool ice_vc_can_add_mac(conststruct ice_vf *vf, const u8 *mac)
{ struct device *dev = ice_pf_to_dev(vf->pf);
if (is_unicast_ether_addr(mac) &&
!ice_can_vf_change_mac((struct ice_vf *)vf)) {
dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); returnfalse;
}
if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
dev_warn(dev, "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.