#include"iavf.h" #include"iavf_ptp.h" #include"iavf_prototype.h" /* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined
*/ #define CREATE_TRACE_POINTS #include"iavf_trace.h"
int iavf_status_to_errno(enum iavf_status status)
{ switch (status) { case IAVF_SUCCESS: return 0; case IAVF_ERR_PARAM: case IAVF_ERR_MAC_TYPE: case IAVF_ERR_INVALID_MAC_ADDR: case IAVF_ERR_INVALID_LINK_SETTINGS: case IAVF_ERR_INVALID_PD_ID: case IAVF_ERR_INVALID_QP_ID: case IAVF_ERR_INVALID_CQ_ID: case IAVF_ERR_INVALID_CEQ_ID: case IAVF_ERR_INVALID_AEQ_ID: case IAVF_ERR_INVALID_SIZE: case IAVF_ERR_INVALID_ARP_INDEX: case IAVF_ERR_INVALID_FPM_FUNC_ID: case IAVF_ERR_QP_INVALID_MSG_SIZE: case IAVF_ERR_INVALID_FRAG_COUNT: case IAVF_ERR_INVALID_ALIGNMENT: case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: case IAVF_ERR_INVALID_IMM_DATA_SIZE: case IAVF_ERR_INVALID_VF_ID: case IAVF_ERR_INVALID_HMCFN_ID: case IAVF_ERR_INVALID_PBLE_INDEX: case IAVF_ERR_INVALID_SD_INDEX: case IAVF_ERR_INVALID_PAGE_DESC_INDEX: case IAVF_ERR_INVALID_SD_TYPE: case IAVF_ERR_INVALID_HMC_OBJ_INDEX: case IAVF_ERR_INVALID_HMC_OBJ_COUNT: case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: return -EINVAL; case IAVF_ERR_NVM: case IAVF_ERR_NVM_CHECKSUM: case IAVF_ERR_PHY: case IAVF_ERR_CONFIG: case IAVF_ERR_UNKNOWN_PHY: case IAVF_ERR_LINK_SETUP: case IAVF_ERR_ADAPTER_STOPPED: case IAVF_ERR_PRIMARY_REQUESTS_PENDING: case IAVF_ERR_AUTONEG_NOT_COMPLETE: case IAVF_ERR_RESET_FAILED: case IAVF_ERR_BAD_PTR: case IAVF_ERR_SWFW_SYNC: case IAVF_ERR_QP_TOOMANY_WRS_POSTED: case IAVF_ERR_QUEUE_EMPTY: case IAVF_ERR_FLUSHED_QUEUE: case IAVF_ERR_OPCODE_MISMATCH: case IAVF_ERR_CQP_COMPL_ERROR: case IAVF_ERR_BACKING_PAGE_ERROR: case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: case IAVF_ERR_MEMCPY_FAILED: case IAVF_ERR_SRQ_ENABLED: case IAVF_ERR_ADMIN_QUEUE_ERROR: case IAVF_ERR_ADMIN_QUEUE_FULL: case IAVF_ERR_BAD_RDMA_CQE: case IAVF_ERR_NVM_BLANK_MODE: case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: case IAVF_ERR_DIAG_TEST_FAILED: case IAVF_ERR_FIRMWARE_API_VERSION: case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: return -EIO; case IAVF_ERR_DEVICE_NOT_SUPPORTED: return -ENODEV; case IAVF_ERR_NO_AVAILABLE_VSI: case IAVF_ERR_RING_FULL: return -ENOSPC; case IAVF_ERR_NO_MEMORY: return -ENOMEM; case IAVF_ERR_TIMEOUT: case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: return -ETIMEDOUT; case IAVF_ERR_NOT_IMPLEMENTED: case IAVF_NOT_SUPPORTED: return -EOPNOTSUPP; case IAVF_ERR_ADMIN_QUEUE_NO_WORK: return -EALREADY; case IAVF_ERR_NOT_READY: return -EBUSY; case IAVF_ERR_BUF_TOO_SHORT: return -EMSGSIZE;
}
return -EIO;
}
int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
{ switch (v_status) { case VIRTCHNL_STATUS_SUCCESS: return 0; case VIRTCHNL_STATUS_ERR_PARAM: case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: return -EINVAL; case VIRTCHNL_STATUS_ERR_NO_MEMORY: return -ENOMEM; case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: return -EIO; case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: return -EOPNOTSUPP;
}
return -EIO;
}
/** * iavf_pdev_to_adapter - go from pci_dev to adapter * @pdev: pci_dev pointer
*/ staticstruct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
{ return netdev_priv(pci_get_drvdata(pdev));
}
/** * iavf_is_reset_in_progress - Check if a reset is in progress * @adapter: board private structure
*/ staticbool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
{ if (adapter->state == __IAVF_RESETTING ||
adapter->flags & (IAVF_FLAG_RESET_PENDING |
IAVF_FLAG_RESET_NEEDED)) returntrue;
returnfalse;
}
/** * iavf_wait_for_reset - Wait for reset to finish. * @adapter: board private structure * * Returns 0 if reset finished successfully, negative on timeout or interrupt.
*/ int iavf_wait_for_reset(struct iavf_adapter *adapter)
{ int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
!iavf_is_reset_in_progress(adapter),
msecs_to_jiffies(5000));
/* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout while waiting * for reset to finish. * If ret > 0 it means reset has finished.
*/ if (ret > 0) return 0; elseif (ret < 0) return -EINTR; else return -EBUSY;
}
/** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to
**/ enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem,
u64 size, u32 alignment)
{ struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
/** * iavf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per ring/queue, but on a constrained vector budget, we * group the rings as "efficiently" as possible. You would add new * mapping configurations in here.
**/ staticvoid iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
{ int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; int q_vectors;
/* In the case where we have more queues than vectors, continue * round-robin on vectors until all queues are mapped.
*/ if (++vidx >= q_vectors)
vidx = 0;
}
/** * iavf_request_misc_irq - Initialize MSI-X interrupts * @adapter: board private structure * * Allocates MSI-X vector 0 and requests interrupts from the kernel. This * vector is only for the admin queue, and stays active even when the netdev * is closed.
**/ staticint iavf_request_misc_irq(struct iavf_adapter *adapter)
{ struct net_device *netdev = adapter->netdev; int err;
/** * iavf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset.
**/ staticvoid iavf_configure_tx(struct iavf_adapter *adapter)
{ struct iavf_hw *hw = &adapter->hw; int i;
for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
}
/** * iavf_select_rx_desc_format - Select Rx descriptor format * @adapter: adapter private structure * * Select what Rx descriptor format based on availability and enabled * features. * * Return: the desired RXDID to select for a given Rx queue, as defined by * enum virtchnl_rxdid_format.
*/ static u8 iavf_select_rx_desc_format(conststruct iavf_adapter *adapter)
{
u64 rxdids = adapter->supp_rxdids;
/* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must * stick with the default value of the legacy 32 byte format.
*/ if (!IAVF_RXDID_ALLOWED(adapter)) return VIRTCHNL_RXDID_1_32B_BASE;
/* Rx timestamping requires the use of flexible NIC descriptors */ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) { if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC)) return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
pci_warn(adapter->pdev, "Unable to negotiate flexible descriptor format\n");
}
/* Warn if the PF does not list support for the default legacy * descriptor format. This shouldn't happen, as this is the format * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is * likely caused by a bug in the PF implementation failing to indicate * support for the format.
*/ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
return VIRTCHNL_RXDID_1_32B_BASE;
}
/** * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset.
**/ staticvoid iavf_configure_rx(struct iavf_adapter *adapter)
{ struct iavf_hw *hw = &adapter->hw;
for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
adapter->rx_rings[i].rxdid = adapter->rxdid;
}
}
/** * iavf_find_vlan - Search filter list for specific vlan filter * @adapter: board private structure * @vlan: vlan tag * * Returns ptr to the filter object or NULL. Must be called while holding the * mac_vlan_list_lock.
**/ staticstruct
iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{ struct iavf_vlan_filter *f;
/** * iavf_add_vlan - Add a vlan filter to the list * @adapter: board private structure * @vlan: VLAN tag * * Returns ptr to the filter object or NULL when no memory available.
**/ staticstruct
iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{ struct iavf_vlan_filter *f = NULL;
spin_lock_bh(&adapter->mac_vlan_list_lock);
f = iavf_find_vlan(adapter, vlan); if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) goto clearout;
f->vlan = vlan;
list_add_tail(&f->list, &adapter->vlan_filter_list);
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
} elseif (f->state == IAVF_VLAN_REMOVE) { /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. * We can safely only change the state here.
*/
f->state = IAVF_VLAN_ACTIVE;
}
/** * iavf_del_vlan - Remove a vlan filter from the list * @adapter: board private structure * @vlan: VLAN tag
**/ staticvoid iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{ struct iavf_vlan_filter *f;
spin_lock_bh(&adapter->mac_vlan_list_lock);
f = iavf_find_vlan(adapter, vlan); if (f) { /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. * Remove it from the list.
*/ if (f->state == IAVF_VLAN_ADD) {
list_del(&f->list);
kfree(f);
adapter->num_vlan_filters--;
} else {
f->state = IAVF_VLAN_REMOVE;
iavf_schedule_aq_request(adapter,
IAVF_FLAG_AQ_DEL_VLAN_FILTER);
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/** * iavf_restore_filters * @adapter: board private structure * * Restore existing non MAC filters when VF netdev comes back up
**/ staticvoid iavf_restore_filters(struct iavf_adapter *adapter)
{ struct iavf_vlan_filter *f;
/* re-add all VLAN filters */
spin_lock_bh(&adapter->mac_vlan_list_lock);
/** * iavf_get_num_vlans_added - get number of VLANs added * @adapter: board private structure
*/
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{ return adapter->num_vlan_filters;
}
/** * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF * @adapter: board private structure * * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, * do not impose a limit as that maintains current behavior and for * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
**/ static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
{ /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has * never been a limit on the VF driver side
*/ if (VLAN_ALLOWED(adapter)) return VLAN_N_VID; elseif (VLAN_V2_ALLOWED(adapter)) return adapter->vlan_v2_caps.filtering.max_filters;
return 0;
}
/** * iavf_max_vlans_added - check if maximum VLANs allowed already exist * @adapter: board private structure
**/ staticbool iavf_max_vlans_added(struct iavf_adapter *adapter)
{ if (iavf_get_num_vlans_added(adapter) <
iavf_get_max_vlans_allowed(adapter)) returnfalse;
returntrue;
}
/** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag
**/ staticint iavf_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{ struct iavf_adapter *adapter = netdev_priv(netdev);
/* Do not track VLAN 0 filter, always added by the PF on VF init */ if (!vid) return 0;
if (!VLAN_FILTERING_ALLOWED(adapter)) return -EIO;
if (iavf_max_vlans_added(adapter)) {
netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
iavf_get_max_vlans_allowed(adapter)); return -EIO;
}
if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM;
return 0;
}
/** * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct * @proto: unused protocol data * @vid: VLAN tag
**/ staticint iavf_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
{ struct iavf_adapter *adapter = netdev_priv(netdev);
/* We do not track VLAN 0 filter */ if (!vid) return 0;
/** * iavf_find_filter - Search filter list for specific mac filter * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL. Must be called while holding the * mac_vlan_list_lock.
**/ staticstruct
iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, const u8 *macaddr)
{ struct iavf_mac_filter *f;
/** * iavf_add_filter - Add a mac filter to the filter list * @adapter: board private structure * @macaddr: the MAC address * * Returns ptr to the filter object or NULL when no memory available.
**/ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr)
{ struct iavf_mac_filter *f;
if (!macaddr) return NULL;
f = iavf_find_filter(adapter, macaddr); if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return f;
/** * iavf_replace_primary_mac - Replace current primary address * @adapter: board private structure * @new_mac: new MAC address to be applied * * Replace current dev_addr and send request to PF for removal of previous * primary MAC address filter and addition of new primary MAC filter. * Return 0 for success, -ENOMEM for failure. * * Do not call this with mac_vlan_list_lock!
**/ staticint iavf_replace_primary_mac(struct iavf_adapter *adapter, const u8 *new_mac)
{ struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *new_f; struct iavf_mac_filter *old_f;
old_f = iavf_find_filter(adapter, hw->mac.addr); if (old_f) {
old_f->is_primary = false;
old_f->remove = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
} /* Always send the request to add if changing primary MAC, * even if filter is already present on the list
*/
new_f->is_primary = true;
new_f->add = true;
ether_addr_copy(hw->mac.addr, new_mac);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER); return 0;
}
/** * iavf_is_mac_set_handled - wait for a response to set MAC from PF * @netdev: network interface device structure * @macaddr: MAC address to set * * Returns true on success, false on failure
*/ staticbool iavf_is_mac_set_handled(struct net_device *netdev, const u8 *macaddr)
{ struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_mac_filter *f; bool ret = false;
spin_lock_bh(&adapter->mac_vlan_list_lock);
f = iavf_find_filter(adapter, macaddr);
if (!f || (!f->add && f->add_handled))
ret = true;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return ret;
}
/** * iavf_set_mac - NDO callback to set port MAC address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure
*/ staticint iavf_set_mac(struct net_device *netdev, void *p)
{ struct iavf_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; int ret;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
ret = iavf_replace_primary_mac(adapter, addr->sa_data);
if (ret) return ret;
ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
iavf_is_mac_set_handled(netdev, addr->sa_data),
msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout. * else it means we got response for set MAC from PF, * check if netdev MAC was updated to requested MAC, * if yes then set MAC succeeded otherwise it failed return -EACCES
*/ if (ret < 0) return ret;
if (!ret) return -EAGAIN;
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) return -EACCES;
return 0;
}
/** * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be added. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/ staticint iavf_addr_sync(struct net_device *netdev, const u8 *addr)
{ struct iavf_adapter *adapter = netdev_priv(netdev);
if (iavf_add_filter(adapter, addr)) return 0; else return -ENOMEM;
}
/** * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address * @netdev: the netdevice * @addr: address to add * * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/ staticint iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
{ struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_mac_filter *f;
/* Under some circumstances, we might receive a request to delete * our own device address from our uc list. Because we store the * device address in the VSI's MAC/VLAN filter list, we need to ignore * such requests and not delete our device address from this list.
*/ if (ether_addr_equal(addr, netdev->dev_addr)) return 0;
f = iavf_find_filter(adapter, addr); if (f) {
f->remove = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
} return 0;
}
/** * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure
*/ staticvoid iavf_up_complete(struct iavf_adapter *adapter)
{
netdev_assert_locked(adapter->netdev);
/** * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF * yet and mark other to be removed. * @adapter: board private structure
**/ staticvoid iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{ struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock); /* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
list) { if (f->add) {
list_del(&f->list);
kfree(f);
} else {
f->remove = true;
}
}
/** * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and * mark other to be removed. * @adapter: board private structure
**/ staticvoid iavf_clear_cloud_filters(struct iavf_adapter *adapter)
{ struct iavf_cloud_filter *cf, *cftmp;
/** * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark * other to be removed. * @adapter: board private structure
**/ staticvoid iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{ struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { /* Cancel a request, keep filter as inactive */
fdir->state = IAVF_FDIR_FLTR_INACTIVE;
} elseif (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) { /* Disable filters which are active or have a pending * request to PF to be added
*/
fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
/** * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark * other to be removed. * @adapter: board private structure
**/ staticvoid iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
{ struct iavf_adv_rss *rss, *rsstmp;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) return;
if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { /* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait * here for this to complete. The watchdog is still running * and it will take care of this.
*/ if (!list_empty(&adapter->mac_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; if (!list_empty(&adapter->vlan_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; if (!list_empty(&adapter->cloud_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; if (!list_empty(&adapter->fdir_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
}
/** * iavf_acquire_msix_vectors - Setup the MSIX capability * @adapter: board private structure * @vectors: number of vectors to request * * Work with the OS to set up the MSIX vectors needed. * * Returns 0 on success, negative on failure
**/ staticint
iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
{ int err, vector_threshold;
/* We'll want at least 3 (vector_threshold): * 0) Other (Admin Queue and link, mostly) * 1) TxQ[0] Cleanup * 2) RxQ[0] Cleanup
*/
vector_threshold = MIN_MSIX_COUNT;
/* The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's.
*/
err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
vector_threshold, vectors); if (err < 0) {
dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL; return err;
}
/* Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NONQ_VECS, or the number of * vectors we were allocated.
*/
adapter->num_msix_vectors = err; return 0;
}
/** * iavf_free_queues - Free memory for all rings * @adapter: board private structure to initialize * * Free all of the memory associated with queue pairs.
**/ staticvoid iavf_free_queues(struct iavf_adapter *adapter)
{ if (!adapter->vsi_res) return;
adapter->num_active_queues = 0;
kfree(adapter->tx_rings);
adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
adapter->rx_rings = NULL;
}
/** * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload * @adapter: board private structure * * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or * stripped in certain descriptor fields. Instead of checking the offload * capability bits in the hot path, cache the location the ring specific * flags.
*/ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
{ int i;
for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *tx_ring = &adapter->tx_rings[i]; struct iavf_ring *rx_ring = &adapter->rx_rings[i];
/* prevent multiple L2TAG bits being set after VFR */
tx_ring->flags &=
~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
rx_ring->flags &=
~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
/** * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue.
**/ staticint iavf_alloc_queues(struct iavf_adapter *adapter)
{ int i, num_active_queues;
/* If we're in reset reallocating queues we don't actually know yet for * certain the PF gave us the number of queues we asked for but we'll * assume it did. Once basic reset is finished we'll confirm once we * start negotiating config with PF.
*/ if (adapter->num_req_queues)
num_active_queues = adapter->num_req_queues; elseif ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc)
num_active_queues = adapter->ch_config.total_qps; else
num_active_queues = min_t(int,
adapter->vsi_res->num_queue_pairs,
(int)(num_online_cpus()));
adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->tx_rings) goto err_out;
adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct iavf_ring), GFP_KERNEL); if (!adapter->rx_rings) goto err_out;
for (i = 0; i < num_active_queues; i++) { struct iavf_ring *tx_ring; struct iavf_ring *rx_ring;
/** * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel.
**/ staticint iavf_set_interrupt_capability(struct iavf_adapter *adapter)
{ int vector, v_budget; int pairs = 0; int err = 0;
/* It's easy to be greedy for MSI-X vectors, but it really doesn't do * us much good if we have more vectors than CPUs. However, we already * limit the total number of queues by the number of CPUs so we do not * need any further limiting here.
*/
v_budget = min_t(int, pairs + NONQ_VECS,
(int)adapter->vf_res->max_vectors);
/** * iavf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid iavf_free_q_vectors(struct iavf_adapter *adapter)
{ int q_idx, num_q_vectors;
/** * iavf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize *
**/ staticint iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
{ int err;
err = iavf_alloc_queues(adapter); if (err) {
dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); goto err_alloc_queues;
}
err = iavf_set_interrupt_capability(adapter); if (err) {
dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt;
}
err = iavf_alloc_q_vectors(adapter); if (err) {
dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors;
}
/* If we've made it so far while ADq flag being ON, then we haven't * bailed out anywhere in middle. And ADq isn't just enabled but actual * resources have been allocated in the reset path. * Now we can truly claim that ADq is enabled.
*/ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc)
dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
adapter->num_tc);
/** * iavf_finish_config - do all netdev work that needs RTNL * @work: our work_struct * * Do work that needs RTNL.
*/ staticvoid iavf_finish_config(struct work_struct *work)
{ struct iavf_adapter *adapter; bool netdev_released = false; int pairs, err;
/* Always take RTNL first to prevent circular lock dependency; * the dev->lock (== netdev lock) is needed to update the queue number.
*/
rtnl_lock();
netdev_lock(adapter->netdev);
switch (adapter->state) { case __IAVF_DOWN: /* Set the real number of queues when reset occurs while * state == __IAVF_DOWN
*/
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
netdev_unlock(adapter->netdev);
netdev_released = true;
err = register_netdevice(adapter->netdev); if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
netdev_unlock(adapter->netdev); goto out;
}
} break; case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
netif_set_real_num_tx_queues(adapter->netdev, pairs); break;
default: break;
}
out: if (!netdev_released)
netdev_unlock(adapter->netdev);
rtnl_unlock();
}
/** * iavf_schedule_finish_config - Set the flags and schedule a reset event * @adapter: board private structure
**/ void iavf_schedule_finish_config(struct iavf_adapter *adapter)
{ if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
queue_work(adapter->wq, &adapter->finish_config);
}
/** * iavf_process_aq_command - process aq_required flags * and sends aq command * @adapter: pointer to iavf adapter structure * * Returns 0 on success * Returns error code if no command was sent * or error code if the command failed.
**/ staticint iavf_process_aq_command(struct iavf_adapter *adapter)
{ if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) return iavf_send_vf_config_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) return iavf_send_vf_offload_vlan_v2_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS) return iavf_send_vf_supported_rxdids_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS) return iavf_send_vf_ptp_caps_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
iavf_map_queues(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
iavf_add_ether_addrs(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
iavf_add_vlans(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
iavf_del_ether_addrs(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
iavf_del_vlans(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
iavf_enable_vlan_stripping(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
iavf_disable_vlan_stripping(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
iavf_cfg_queues_bw(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
iavf_get_qos_caps(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
iavf_cfg_queues_quanta_size(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
iavf_configure_queues(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
iavf_enable_queues(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) { /* This message goes straight to the firmware, not the * PF, so we don't have to set current_op as we will * not get a response through the ARQ.
*/
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS; return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
iavf_get_rss_hashcfg(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
iavf_set_rss_hashcfg(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
iavf_set_rss_key(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
iavf_set_rss_lut(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
iavf_set_rss_hfunc(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
iavf_enable_channels(adapter); return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
iavf_disable_channels(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
iavf_add_cloud_filter(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter); return IAVF_SUCCESS;
} if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
iavf_del_fdir_filter(adapter); return IAVF_SUCCESS;
} if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
iavf_add_adv_rss_cfg(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
iavf_del_adv_rss_cfg(adapter); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); return 0;
} if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
iavf_virtchnl_send_ptp_cmd(adapter); return IAVF_SUCCESS;
} if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter); return 0;
}
return -EAGAIN;
}
/** * iavf_set_vlan_offload_features - set VLAN offload configuration * @adapter: board private structure * @prev_features: previous features used for comparison * @features: updated features used for configuration * * Set the aq_required bit(s) based on the requested features passed in to * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule * the watchdog if any changes are requested to expedite the request via * virtchnl.
**/ staticvoid
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
netdev_features_t prev_features,
netdev_features_t features)
{ bool enable_stripping = true, enable_insertion = true;
u16 vlan_ethertype = 0;
u64 aq_required = 0;
/* keep cases separate because one ethertype for offloads can be * disabled at the same time as another is disabled, so check for an * enabled ethertype first, then check for disabled. Default to * ETH_P_8021Q so an ethertype is specified if disabling insertion and * stripping.
*/ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
vlan_ethertype = ETH_P_8021AD; elseif (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
vlan_ethertype = ETH_P_8021Q; elseif (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
vlan_ethertype = ETH_P_8021AD; elseif (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
vlan_ethertype = ETH_P_8021Q; else
vlan_ethertype = ETH_P_8021Q;
if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
enable_stripping = false; if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
enable_insertion = false;
if (VLAN_ALLOWED(adapter)) { /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN * stripping via virtchnl. VLAN insertion can be toggled on the * netdev, but it doesn't require a virtchnl message
*/ if (enable_stripping)
aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else
aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
} elseif (VLAN_V2_ALLOWED(adapter)) { switch (vlan_ethertype) { case ETH_P_8021Q: if (enable_stripping)
aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; else
aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
if (enable_insertion)
aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; else
aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; break; case ETH_P_8021AD: if (enable_stripping)
aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; else
aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
if (aq_required)
iavf_schedule_aq_request(adapter, aq_required);
}
/** * iavf_startup - first step of driver startup * @adapter: board private structure * * Function process __IAVF_STARTUP driver state. * When success the state is changed to __IAVF_INIT_VERSION_CHECK * when fails the state is changed to __IAVF_INIT_FAILED
**/ staticvoid iavf_startup(struct iavf_adapter *adapter)
{ struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; enum iavf_status status; int ret;
ret = iavf_check_reset_complete(hw); if (ret) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
ret); goto err;
}
hw->aq.num_arq_entries = IAVF_AQ_LEN;
hw->aq.num_asq_entries = IAVF_AQ_LEN;
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
status = iavf_init_adminq(hw); if (status) {
dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
status); goto err;
}
ret = iavf_send_api_ver(adapter); if (ret) {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.