if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return 0; /* nothing to see here, move along */
status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (status)
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
op, iavf_stat_str(hw, status),
libie_aq_str(hw->aq.asq_last_status)); return iavf_status_to_errno(status);
}
/** * iavf_send_api_ver * @adapter: adapter structure * * Send API version admin queue message to the PF. The reply is not checked * in this function. Returns 0 if the message was successfully * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/ int iavf_send_api_ver(struct iavf_adapter *adapter)
{ struct virtchnl_version_info vvi;
/** * iavf_poll_virtchnl_msg * @hw: HW configuration structure * @event: event to populate on success * @op_to_poll: requested virtchnl op to poll for * * Initialize poll for virtchnl msg matching the requested_op. Returns 0 * if a message of the correct opcode is in the queue or an error code * if no message matching the op code is waiting and other failures.
*/ staticint
iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event, enum virtchnl_ops op_to_poll)
{ enum virtchnl_ops received_op; enum iavf_status status;
u32 v_retval;
while (1) { /* When the AQ is empty, iavf_clean_arq_element will return * nonzero and this loop will terminate.
*/
status = iavf_clean_arq_element(hw, event, NULL); if (status != IAVF_SUCCESS) return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING) continue;
dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
iavf_schedule_reset(adapter,
IAVF_FLAG_RESET_PENDING);
/** * iavf_verify_api_ver * @adapter: adapter structure * * Compare API versions with the PF. Must be called after admin queue is * initialized. Returns 0 if API versions match, -EIO if they do not, * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors * from the firmware are propagated.
**/ int iavf_verify_api_ver(struct iavf_adapter *adapter)
{ struct iavf_arq_event_info event; int err;
/** * iavf_send_vf_config_msg * @adapter: adapter structure * * Send VF configuration request admin queue message to the PF. The reply * is not checked in this function. Returns 0 if the message was * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
**/ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
{
u32 caps;
/** * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities * @adapter: private adapter structure * * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP * capabilities available to this device. This includes the following * potential access: * * * READ_PHC - access to read the PTP hardware clock time * * RX_TSTAMP - access to request Rx timestamps on all received packets * * The PF will reply with the same opcode a filled out copy of the * virtchnl_ptp_caps structure which defines the specifics of which features * are accessible to this device. * * Return: 0 if success, error code otherwise.
*/ int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
{ struct virtchnl_ptp_caps hw_caps = {
.caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
};
/** * iavf_validate_num_queues * @adapter: adapter structure * * Validate that the number of queues the PF has sent in * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
**/ staticvoid iavf_validate_num_queues(struct iavf_adapter *adapter)
{ if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) { struct virtchnl_vsi_resource *vsi_res; int i;
dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
adapter->vf_res->num_queue_pairs,
IAVF_MAX_REQ_QUEUES);
dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
IAVF_MAX_REQ_QUEUES);
adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES; for (i = 0; i < adapter->vf_res->num_vsis; i++) {
vsi_res = &adapter->vf_res->vsi_res[i];
vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
}
}
}
/** * iavf_get_vf_config * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after * admin queue is initialized. Busy waits until response is received from PF, * with maximum timeout. Response from PF is returned in the buffer for further * processing by the caller.
**/ int iavf_get_vf_config(struct iavf_adapter *adapter)
{ struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event;
u16 len; int err;
len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
event.buf_len = len;
event.msg_buf = kzalloc(len, GFP_KERNEL); if (!event.msg_buf) return -ENOMEM;
/* some PFs send more queues than we should have so validate that * we aren't getting too many queues
*/ if (!err)
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(hw, adapter->vf_res);
kfree(event.msg_buf);
return err;
}
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
{ struct iavf_arq_event_info event; int err;
u16 len;
len = sizeof(struct virtchnl_vlan_caps);
event.buf_len = len;
event.msg_buf = kzalloc(len, GFP_KERNEL); if (!event.msg_buf) return -ENOMEM;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op); return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = virtchnl_struct_size(vqci, qpair, pairs);
vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return;
if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair; /* Size check is not needed here - HW max is 16 queue pairs, and we * can fit info for 31 of them into the AQ buffer before it overflows.
*/ for (i = 0; i < pairs; i++) {
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; if (IAVF_RXDID_ALLOWED(adapter))
vqpi->rxq.rxdid = adapter->rxdid; if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
vqpi->rxq.flags = rx_flags;
vqpi++;
}
/** * iavf_enable_queues * @adapter: adapter structure * * Request that the PF enable all of our queues.
**/ void iavf_enable_queues(struct iavf_adapter *adapter)
{ struct virtchnl_queue_select vqs;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op); return;
}
adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
/** * iavf_disable_queues * @adapter: adapter structure * * Request that the PF disable all of our queues.
**/ void iavf_disable_queues(struct iavf_adapter *adapter)
{ struct virtchnl_queue_select vqs;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op); return;
}
adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
/** * iavf_map_queues * @adapter: adapter structure * * Request that the PF map queues to interrupt vectors. Misc causes, including * admin queue, are always mapped to vector 0.
**/ void iavf_map_queues(struct iavf_adapter *adapter)
{ struct virtchnl_irq_map_info *vimi; struct virtchnl_vector_map *vecmap; struct iavf_q_vector *q_vector; int v_idx, q_vectors;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op); return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
/** * iavf_set_mac_addr_type - Set the correct request type from the filter type * @virtchnl_ether_addr: pointer to requested list element * @filter: pointer to requested filter
**/ staticvoid
iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr, conststruct iavf_mac_filter *filter)
{
virtchnl_ether_addr->type = filter->is_primary ?
VIRTCHNL_ETHER_ADDR_PRIMARY :
VIRTCHNL_ETHER_ADDR_EXTRA;
}
/** * iavf_add_ether_addrs * @adapter: adapter structure * * Request that the PF add one or more addresses to our filters.
**/ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
{ struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f; int i = 0, count = 0; bool more = false;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op); return;
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add)
count++;
} if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(veal, list, --count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC); if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
iavf_set_mac_addr_type(&veal->list[i], f);
i++;
f->add = false; if (i == count) break;
}
} if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
/** * iavf_del_ether_addrs * @adapter: adapter structure * * Request that the PF remove one or more addresses from our filters.
**/ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
{ struct virtchnl_ether_addr_list *veal; struct iavf_mac_filter *f, *ftmp; int i = 0, count = 0; bool more = false;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op); return;
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->remove)
count++;
} if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(veal, list, --count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC); if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
iavf_set_mac_addr_type(&veal->list[i], f);
i++;
list_del(&f->list);
kfree(f); if (i == count) break;
}
} if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
/** * iavf_add_vlans * @adapter: adapter structure * * Request that the PF add one or more VLAN filters to our VSI.
**/ void iavf_add_vlans(struct iavf_adapter *adapter)
{ int len, i = 0, count = 0; struct iavf_vlan_filter *f; bool more = false;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op); return;
}
if (VLAN_ALLOWED(adapter)) { struct virtchnl_vlan_filter_list *vvfl;
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_ADD) {
vvfl->vlan_id[i] = f->vlan.vid;
i++;
f->state = IAVF_VLAN_IS_NEW; if (i == count) break;
}
} if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
if ((count + current_vlans) > max_vlans &&
current_vlans < max_vlans) {
count = max_vlans - iavf_get_num_vlans_added(adapter);
more = true;
}
len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
more = true;
}
vvfl_v2 = kzalloc(len, GFP_ATOMIC); if (!vvfl_v2) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
/* give priority over outer if it's enabled */ if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer; else
vlan = &vvfl_v2->filters[i].inner;
/** * iavf_del_vlans * @adapter: adapter structure * * Request that the PF remove one or more VLAN filters from our VSI.
**/ void iavf_del_vlans(struct iavf_adapter *adapter)
{ struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op); return;
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { /* since VLAN capabilities are not allowed, we dont want to send * a VLAN delete request because it will most likely fail and * create unnecessary errors/noise, so just free the VLAN * filters marked for removal to enable bailing out before * sending a virtchnl message
*/ if (f->state == IAVF_VLAN_REMOVE &&
!VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
} elseif (f->state == IAVF_VLAN_DISABLE &&
!VLAN_FILTERING_ALLOWED(adapter)) {
f->state = IAVF_VLAN_INACTIVE;
} elseif (f->state == IAVF_VLAN_REMOVE ||
f->state == IAVF_VLAN_DISABLE) {
count++;
}
} if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
if (VLAN_ALLOWED(adapter)) { struct virtchnl_vlan_filter_list *vvfl;
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { if (f->state == IAVF_VLAN_DISABLE) {
vvfl->vlan_id[i] = f->vlan.vid;
f->state = IAVF_VLAN_INACTIVE;
i++; if (i == count) break;
} elseif (f->state == IAVF_VLAN_REMOVE) {
vvfl->vlan_id[i] = f->vlan.vid;
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
i++; if (i == count) break;
}
}
if (!more)
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
more = true;
}
vvfl_v2 = kzalloc(len, GFP_ATOMIC); if (!vvfl_v2) {
spin_unlock_bh(&adapter->mac_vlan_list_lock); return;
}
/* give priority over outer if it's enabled */ if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer; else
vlan = &vvfl_v2->filters[i].inner;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op); return;
}
/* prevent changes to promiscuous flags */
spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
/* sanity check to prevent duplicate AQ calls */ if (!iavf_promiscuous_mode_changed(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n"); /* allow changes to promiscuous flags */
spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); return;
}
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return;
}
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs))) /* if the request failed, don't lock out others */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/** * iavf_get_rss_hashcfg * @adapter: adapter structure * * Request RSS Hash enable bits from PF
**/ void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
{ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
adapter->current_op); return;
}
adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
}
/** * iavf_set_rss_hashcfg * @adapter: adapter structure * * Request the PF to set our RSS hash capabilities
**/ void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
{ struct virtchnl_rss_hashcfg vrh;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
adapter->current_op); return;
}
vrh.hashcfg = adapter->rss_hashcfg;
adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh, sizeof(vrh));
}
/** * iavf_set_rss_key * @adapter: adapter structure * * Request the PF to set our RSS hash key
**/ void iavf_set_rss_key(struct iavf_adapter *adapter)
{ struct virtchnl_rss_key *vrk; int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
adapter->current_op); return;
}
len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
vrk = kzalloc(len, GFP_KERNEL); if (!vrk) return;
vrk->vsi_id = adapter->vsi.id;
vrk->key_len = adapter->rss_key_size;
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
/** * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request * @adapter: adapter structure * @tpid: VLAN TPID * @offload_op: opcode used to determine which AQ required bit to clear
*/ staticvoid
iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, enum virtchnl_ops offload_op)
{ switch (offload_op) { case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: if (tpid == ETH_P_8021Q)
adapter->aq_required &=
~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; elseif (tpid == ETH_P_8021AD)
adapter->aq_required &=
~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; break; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: if (tpid == ETH_P_8021Q)
adapter->aq_required &=
~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; elseif (tpid == ETH_P_8021AD)
adapter->aq_required &=
~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; break; case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: if (tpid == ETH_P_8021Q)
adapter->aq_required &=
~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; elseif (tpid == ETH_P_8021AD)
adapter->aq_required &=
~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; break; case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: if (tpid == ETH_P_8021Q)
adapter->aq_required &=
~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; elseif (tpid == ETH_P_8021AD)
adapter->aq_required &=
~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; break; default:
dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
offload_op);
}
}
/** * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl * @adapter: adapter structure * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) * @offload_op: offload_op used to make the request over virtchnl
*/ staticvoid
iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, enum virtchnl_ops offload_op)
{ struct virtchnl_vlan_setting *msg; int len = sizeof(*msg);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
offload_op, adapter->current_op); return;
}
adapter->current_op = offload_op;
msg = kzalloc(len, GFP_KERNEL); if (!msg) return;
msg->vport_id = adapter->vsi_res->vsi_id;
/* always clear to prevent unsupported and endless requests */
iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) /** * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command * @adapter: adapter private structure * * De-queue one PTP command request and send the command message to the PF. * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
*/ void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
{ struct iavf_ptp_aq_cmd *cmd; int err;
if (!adapter->ptp.clock) { /* This shouldn't be possible to hit, since no messages should * be queued if PTP is not initialized.
*/
pci_err(adapter->pdev, "PTP is not initialized\n");
adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; return;
}
mutex_lock(&adapter->ptp.aq_cmd_lock);
cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds, struct iavf_ptp_aq_cmd, list); if (!cmd) { /* no further PTP messages to send */
adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; goto out_unlock;
}
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
pci_err(adapter->pdev, "Cannot send PTP command %d, command %d pending\n",
cmd->v_opcode, adapter->current_op); goto out_unlock;
}
err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen); if (!err) { /* Command was sent without errors, so we can remove it from * the list and discard it.
*/
list_del(&cmd->list);
kfree(cmd);
} else { /* We failed to send the command, try again next cycle */
pci_err(adapter->pdev, "Failed to send PTP command %d\n",
cmd->v_opcode);
}
if (list_empty(&adapter->ptp.aq_cmds)) /* no further PTP messages to send */
adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
/** * iavf_print_link_message - print link up or down * @adapter: adapter structure * * Log a message telling the world of our wonderous link status
*/ staticvoid iavf_print_link_message(struct iavf_adapter *adapter)
{ struct net_device *netdev = adapter->netdev; int link_speed_mbps; char *speed;
if (!adapter->link_up) {
netdev_info(netdev, "NIC Link is Down\n"); return;
}
if (ADV_LINK_SUPPORT(adapter)) {
link_speed_mbps = adapter->link_speed_mbps; goto print_link_msg;
}
switch (adapter->link_speed) { case VIRTCHNL_LINK_SPEED_40GB:
link_speed_mbps = SPEED_40000; break; case VIRTCHNL_LINK_SPEED_25GB:
link_speed_mbps = SPEED_25000; break; case VIRTCHNL_LINK_SPEED_20GB:
link_speed_mbps = SPEED_20000; break; case VIRTCHNL_LINK_SPEED_10GB:
link_speed_mbps = SPEED_10000; break; case VIRTCHNL_LINK_SPEED_5GB:
link_speed_mbps = SPEED_5000; break; case VIRTCHNL_LINK_SPEED_2_5GB:
link_speed_mbps = SPEED_2500; break; case VIRTCHNL_LINK_SPEED_1GB:
link_speed_mbps = SPEED_1000; break; case VIRTCHNL_LINK_SPEED_100MB:
link_speed_mbps = SPEED_100; break; default:
link_speed_mbps = SPEED_UNKNOWN; break;
}
netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
kfree(speed);
}
/** * iavf_get_vpe_link_status * @adapter: adapter structure * @vpe: virtchnl_pf_event structure * * Helper function for determining the link status
**/ staticbool
iavf_get_vpe_link_status(struct iavf_adapter *adapter, struct virtchnl_pf_event *vpe)
{ if (ADV_LINK_SUPPORT(adapter)) return vpe->event_data.link_event_adv.link_status; else return vpe->event_data.link_event.link_status;
}
/** * iavf_set_adapter_link_speed_from_vpe * @adapter: adapter structure for which we are setting the link speed * @vpe: virtchnl_pf_event structure that contains the link speed we are setting * * Helper function for setting iavf_adapter link speed
**/ staticvoid
iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, struct virtchnl_pf_event *vpe)
{ if (ADV_LINK_SUPPORT(adapter))
adapter->link_speed_mbps =
vpe->event_data.link_event_adv.link_speed; else
adapter->link_speed = vpe->event_data.link_event.link_speed;
}
/** * iavf_get_qos_caps - get qos caps support * @adapter: iavf adapter struct instance * * This function requests PF for Supported QoS Caps.
*/ void iavf_get_qos_caps(struct iavf_adapter *adapter)
{ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get qos caps, command %d pending\n",
adapter->current_op); return;
}
/** * iavf_set_quanta_size - set quanta size of queue chunk * @adapter: iavf adapter struct instance * @quanta_size: quanta size in bytes * @queue_index: starting index of queue chunk * @num_queues: number of queues in the queue chunk * * This function requests PF to set quanta size of queue chunk * starting at queue_index.
*/ staticvoid
iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
u16 queue_index, u16 num_queues)
{ struct virtchnl_quanta_cfg quanta_cfg;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set queue quanta size, command %d pending\n",
adapter->current_op); return;
}
/** * iavf_cfg_queues_quanta_size - configure quanta size of queues * @adapter: adapter structure * * Request that the PF configure quanta size of allocated queues.
**/ void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
{ int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
/* Set Queue Quanta Size to default */
iavf_set_quanta_size(adapter, quanta_size, 0,
adapter->num_active_queues);
}
/** * iavf_cfg_queues_bw - configure bandwidth of allocated queues * @adapter: iavf adapter structure instance * * This function requests PF to configure queue bandwidth of allocated queues
*/ void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
{ struct virtchnl_queues_bw_cfg *qs_bw_cfg; struct net_shaper *q_shaper; int qs_to_update = 0; int i, inx = 0;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set tc queue bw, command %d pending\n",
adapter->current_op); return;
}
for (i = 0; i < adapter->num_active_queues; i++) { if (adapter->tx_rings[i].q_shaper_update)
qs_to_update++;
}
len = struct_size(qs_bw_cfg, cfg, qs_to_update);
qs_bw_cfg = kzalloc(len, GFP_KERNEL); if (!qs_bw_cfg) return;
/** * iavf_enable_channels * @adapter: adapter structure * * Request that the PF enable channels as specified by * the user via tc tool.
**/ void iavf_enable_channels(struct iavf_adapter *adapter)
{ struct virtchnl_tc_info *vti = NULL;
size_t len; int i;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
adapter->current_op); return;
}
len = virtchnl_struct_size(vti, list, adapter->num_tc);
vti = kzalloc(len, GFP_KERNEL); if (!vti) return;
vti->num_tc = adapter->num_tc; for (i = 0; i < vti->num_tc; i++) {
vti->list[i].count = adapter->ch_config.ch_info[i].count;
vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
vti->list[i].pad = 0;
vti->list[i].max_tx_rate =
adapter->ch_config.ch_info[i].max_tx_rate;
}
/** * iavf_add_cloud_filter * @adapter: adapter structure * * Request that the PF add cloud filters as specified * by the user via tc tool.
**/ void iavf_add_cloud_filter(struct iavf_adapter *adapter)
{ struct iavf_cloud_filter *cf; struct virtchnl_filter *f; int len = 0, count = 0;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
adapter->current_op); return;
}
list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->add) {
count++; break;
}
} if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER; return;
}
adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
len = sizeof(struct virtchnl_filter);
f = kzalloc(len, GFP_KERNEL); if (!f) return;
/** * iavf_del_cloud_filter * @adapter: adapter structure * * Request that the PF delete cloud filters as specified * by the user via tc tool.
**/ void iavf_del_cloud_filter(struct iavf_adapter *adapter)
{ struct iavf_cloud_filter *cf, *cftmp; struct virtchnl_filter *f; int len = 0, count = 0;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
adapter->current_op); return;
}
list_for_each_entry(cf, &adapter->cloud_filter_list, list) { if (cf->del) {
count++; break;
}
} if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER; return;
}
adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
len = sizeof(struct virtchnl_filter);
f = kzalloc(len, GFP_KERNEL); if (!f) return;
/** * iavf_add_fdir_filter * @adapter: the VF adapter structure * * Request that the PF add Flow Director filters as specified * by the user via ethtool.
**/ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
{ struct iavf_fdir_fltr *fdir; struct virtchnl_fdir_add *f; bool process_fltr = false; int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
adapter->current_op); return;
}
len = sizeof(struct virtchnl_fdir_add);
f = kzalloc(len, GFP_KERNEL); if (!f) return;
if (!process_fltr) { /* prevent iavf_add_fdir_filter() from being called when there * are no filters to add
*/
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
kfree(f); return;
}
adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
kfree(f);
}
/** * iavf_del_fdir_filter * @adapter: the VF adapter structure * * Request that the PF delete Flow Director filters as specified * by the user via ethtool.
**/ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{ struct virtchnl_fdir_del f = {}; struct iavf_fdir_fltr *fdir; bool process_fltr = false; int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
adapter->current_op); return;
}
/** * iavf_add_adv_rss_cfg * @adapter: the VF adapter structure * * Request that the PF add RSS configuration as specified * by the user via ethtool.
**/ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
{ struct virtchnl_rss_cfg *rss_cfg; struct iavf_adv_rss *rss; bool process_rss = false; int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
adapter->current_op); return;
}
len = sizeof(struct virtchnl_rss_cfg);
rss_cfg = kzalloc(len, GFP_KERNEL); if (!rss_cfg) return;
/** * iavf_del_adv_rss_cfg * @adapter: the VF adapter structure * * Request that the PF delete RSS configuration as specified * by the user via ethtool.
**/ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
{ struct virtchnl_rss_cfg *rss_cfg; struct iavf_adv_rss *rss; bool process_rss = false; int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
adapter->current_op); return;
}
len = sizeof(struct virtchnl_rss_cfg);
rss_cfg = kzalloc(len, GFP_KERNEL); if (!rss_cfg) return;
/** * iavf_request_reset * @adapter: adapter structure * * Request that the PF reset this VF. No response is expected.
**/ int iavf_request_reset(struct iavf_adapter *adapter)
{ int err; /* Don't check CURRENT_OP - this is always higher priority */
err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN; return err;
}
/** * iavf_netdev_features_vlan_strip_set - update vlan strip status * @netdev: ptr to netdev being adjusted * @enable: enable or disable vlan strip * * Helper function to change vlan strip status in netdev->features.
*/ staticvoid iavf_netdev_features_vlan_strip_set(struct net_device *netdev, constbool enable)
{ if (enable)
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; else
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/** * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset * @adapter: private adapter structure * * Called after a reset to re-add all FDIR filters and delete some of them * if they were pending to be deleted.
*/ staticvoid iavf_activate_fdir_filters(struct iavf_adapter *adapter)
{ struct iavf_fdir_fltr *f, *ftmp; bool add_filters = false;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
f->state == IAVF_FDIR_FLTR_ACTIVE) { /* All filters and requests have been removed in PF, * restore them
*/
f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
add_filters = true;
} elseif (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
f->state == IAVF_FDIR_FLTR_DIS_PENDING) { /* Link down state, leave filters as inactive */
f->state = IAVF_FDIR_FLTR_INACTIVE;
} elseif (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
f->state == IAVF_FDIR_FLTR_DEL_PENDING) { /* Delete filters that were pending to be deleted, the * list on PF is already cleared after a reset
*/
list_del(&f->list);
iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (add_filters)
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
}
/** * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME * @adapter: private adapter structure * @data: the message from the PF * @len: length of the message from the PF * * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message * is sent by the PF in response to the same op as a request from the VF. * Extract the 64bit nanoseconds time from the message and store it in * cached_phc_time. Then, notify any thread that is waiting for the update via * the wait queue.
*/ staticvoid iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter, void *data, u16 len)
{ struct virtchnl_phc_time *msg = data;
if (len != sizeof(*msg)) {
dev_err_once(&adapter->pdev->dev, "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
len, sizeof(*msg)); return;
}
/** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF * @v_retval: retval sent by PF * @msg: message sent by PF * @msglen: message length * * Asynchronous completion function for admin queue messages. Rather than busy * wait, we fire off our requests and assume that no errors will be returned. * This function handles the reply messages.
**/ void iavf_virtchnl_completion(struct iavf_adapter *adapter, enum virtchnl_ops v_opcode, enum iavf_status v_retval, u8 *msg, u16 msglen)
{
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.