/* Local includes */ #include"i40e.h" #include"i40e_devids.h" #include"i40e_diag.h" #include"i40e_lan_hmc.h" #include"i40e_virtchnl_pf.h" #include"i40e_xsk.h"
/* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined
*/ #define CREATE_TRACE_POINTS #include"i40e_trace.h"
/** * i40e_hw_to_dev - get device pointer from the hardware structure * @hw: pointer to the device HW structure
**/ struct device *i40e_hw_to_dev(struct i40e_hw *hw)
{ struct i40e_pf *pf = i40e_hw_to_pf(hw);
return &pf->pdev->dev;
}
/** * i40e_allocate_dma_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to
**/ int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
u64 size, u32 alignment)
{ struct i40e_pf *pf = i40e_hw_to_pf(hw);
/** * i40e_free_dma_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free
**/ int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{ struct i40e_pf *pf = i40e_hw_to_pf(hw);
/** * i40e_allocate_virt_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested
**/ int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
u32 size)
{
mem->size = size;
mem->va = kzalloc(size, GFP_KERNEL);
if (!mem->va) return -ENOMEM;
return 0;
}
/** * i40e_free_virt_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free
**/ int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
{ /* it's ok to kfree a NULL pointer */
kfree(mem->va);
mem->va = NULL;
mem->size = 0;
return 0;
}
/** * i40e_get_lump - find a lump of free generic resource * @pf: board private structure * @pile: the pile of resource to search * @needed: the number of items needed * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error
**/ staticint i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
{ int ret = -ENOMEM; int i, j;
/* Allocate last queue in the pile for FDIR VSI queue * so it doesn't fragment the qp_pile
*/ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
dev_err(&pf->pdev->dev, "Cannot allocate queue %d for I40E_VSI_FDIR\n",
pile->num_entries - 1); return -ENOMEM;
}
pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; return pile->num_entries - 1;
}
i = 0; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) {
i++; continue;
}
/* do we have enough in this lump? */ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { if (pile->list[i+j] & I40E_PILE_VALID_BIT) break;
}
if (j == needed) { /* there was enough, so assign it to the requestor */ for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i; break;
}
/* not enough, so skip over it and continue looking */
i += j;
}
return ret;
}
/** * i40e_put_lump - return a lump of generic resource * @pile: the pile of resource to search * @index: the base item index * @id: the owner id of the items assigned * * Returns the count of items in the lump
**/ staticint i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{ int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0;
u16 i;
if (!pile || index >= pile->num_entries) return -EINVAL;
for (i = index;
i < pile->num_entries && pile->list[i] == valid_id;
i++) {
pile->list[i] = 0;
count++;
}
return count;
}
/** * i40e_find_vsi_from_id - searches for the vsi with the given id * @pf: the pf structure to search for the vsi * @id: id of the vsi it is searching for
**/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{ struct i40e_vsi *vsi; int i;
i40e_pf_for_each_vsi(pf, i, vsi) if (vsi->id == id) return vsi;
return NULL;
}
/** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * * If not already scheduled, this puts the task into the work queue
**/ void i40e_service_event_schedule(struct i40e_pf *pf)
{ if ((!test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
test_bit(__I40E_RECOVERY_MODE, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
/** * i40e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number timing out * * If any port has noticed a Tx timeout, it is likely that the whole * device is munged, not just the one netdev port, so go for the full * reset.
**/ staticvoid i40e_tx_timeout(struct net_device *netdev, unsignedint txqueue)
{ struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; unsignedint i;
u32 head, val;
pf->tx_timeout_count++;
/* with txqueue index, find the tx_ring struct */ for (i = 0; i < vsi->num_queue_pairs; i++) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (txqueue ==
vsi->tx_rings[i]->queue_index) {
tx_ring = vsi->tx_rings[i]; break;
}
}
}
if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
pf->tx_timeout_recovery_level = 1; /* reset after some time */ elseif (time_before(jiffies,
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */
/* don't kick off another recovery if one is already pending */ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) return;
if (tx_ring) {
head = i40e_get_head(tx_ring); /* Read interrupt register */ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring->vsi->base_vector - 1)); else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
/** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task.
**/ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
{ return &vsi->net_stats;
}
/** * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring * @ring: Tx ring to get statistics from * @stats: statistics entry to be updated
**/ staticvoid i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, struct rtnl_link_stats64 *stats)
{
u64 bytes, packets; unsignedint start;
do {
start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
} while (u64_stats_fetch_retry(&ring->syncp, start));
/** * i40e_vsi_reset_stats - Resets all stats of the given vsi * @vsi: the VSI to have its stats reset
**/ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
{ struct rtnl_link_stats64 *ns; int i;
/** * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset
**/ void i40e_pf_reset_stats(struct i40e_pf *pf)
{ struct i40e_veb *veb; int i;
/** * i40e_compute_pci_to_hw_id - compute index form PCI function. * @vsi: ptr to the VSI to read from. * @hw: ptr to the hardware info.
**/ static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
{ int pf_count = i40e_get_pf_count(hw);
/** * i40e_stat_update64 - read and update a 64 bit stat from the chip. * @hw: ptr to the hardware info. * @hireg: the high 32 bit reg to read. * @loreg: the low 32 bit reg to read. * @offset_loaded: has the initial offset been loaded yet. * @offset: ptr to current offset value. * @stat: ptr to the stat. * * Since the device stats are not reset at PFReset, they will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero.
**/ staticvoid i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat)
{
u64 new_data;
/** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read * @loreg: the low 32 bit reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. In the process, we also manage * the potential roll-over.
**/ staticvoid i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat)
{
u64 new_data;
/** * i40e_stat_update32 - read and update a 32 bit stat from the chip * @hw: ptr to the hardware info * @reg: the hw reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat
**/ staticvoid i40e_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat)
{
u32 new_data;
/** * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat * @hw: ptr to the hardware info * @reg: the hw reg to read and clear * @stat: ptr to the stat
**/ staticvoid i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
{
u32 new_data = rd32(hw, reg);
wr32(hw, reg, 1); /* must write a nonzero value to clear register */
*stat += new_data;
}
/** * i40e_stats_update_rx_discards - update rx_discards. * @vsi: ptr to the VSI to be updated. * @hw: ptr to the hardware info. * @stat_idx: VSI's stat_counter_idx. * @offset_loaded: ptr to the VSI's stat_offsets_loaded. * @stat_offset: ptr to stat_offset to store first read of specific register. * @stat: ptr to VSI's stat to be updated.
**/ staticvoid
i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, int stat_idx, bool offset_loaded, struct i40e_eth_stats *stat_offset, struct i40e_eth_stats *stat)
{
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
&stat_offset->rx_discards, &stat->rx_discards);
i40e_stat_update64(hw,
I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
offset_loaded, &stat_offset->rx_discards_other,
&stat->rx_discards_other);
}
/** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated
**/ void i40e_update_eth_stats(struct i40e_vsi *vsi)
{ int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
/* Gather up the stats that the hw collects */
i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_errors, &es->tx_errors);
i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unknown_protocol, &es->rx_unknown_protocol);
/** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * * There are a few instances where we store the same stat in a * couple of different structs. This is partly because we have * the netdev stats that need to be filled out, which is slightly * different from the "eth_stats" defined by the chip and used in * VF communications. We sort it out here.
**/ staticvoid i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy; struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */
u64 tx_restart, tx_busy; struct i40e_ring *p;
u64 bytes, packets; unsignedint start;
u64 tx_linearize;
u64 tx_force_wb;
u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
test_bit(__I40E_CONFIG_BUSY, pf->state)) return;
ns = i40e_get_vsi_stats_struct(vsi);
ons = &vsi->net_stats_offsets;
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
/* Gather up the netdev and vsi stats that the driver collects * on the fly during packet processing
*/
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
rx_reuse = 0;
rx_alloc = 0;
rx_waive = 0;
rx_busy = 0;
rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]); if (!p) continue;
/* pull in a couple PF stats if this is the main vsi */ if (vsi->type == I40E_VSI_MAIN) {
ns->rx_crc_errors = pf->stats.crc_errors;
ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
ns->rx_length_errors = pf->stats.rx_length_errors;
}
}
/** * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated
**/ staticvoid i40e_update_pf_stats(struct i40e_pf *pf)
{ struct i40e_hw_port_stats *osd = &pf->stats_offsets; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw;
u32 val; int i;
/** * i40e_update_stats - Update the various statistics counters. * @vsi: the VSI to be updated * * Update the various stats for this VSI and its related entities.
**/ void i40e_update_stats(struct i40e_vsi *vsi)
{ struct i40e_pf *pf = vsi->back;
if (vsi->type == I40E_VSI_MAIN)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
}
/** * i40e_count_all_filters - counts VSI MAC filters * @vsi: the VSI to be searched * * Return: count of MAC filters in any state.
*/ int i40e_count_all_filters(struct i40e_vsi *vsi)
{ struct i40e_mac_filter *f; struct hlist_node *h; int bkt, cnt = 0;
/** * i40e_count_active_filters - counts VSI MAC filters * @vsi: the VSI to be searched * * Return: count of active MAC filters.
*/ int i40e_count_active_filters(struct i40e_vsi *vsi)
{ struct i40e_mac_filter *f; struct hlist_node *h; int bkt; int cnt = 0;
/** * i40e_find_filter - Search VSI filter list for specific mac/vlan filter * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL
**/ staticstruct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
{ struct i40e_mac_filter *f;
u64 key;
/** * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for * * Returns the first filter with the provided MAC address or NULL if * MAC address was not found
**/ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
{ struct i40e_mac_filter *f;
u64 key;
/** * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode * @vsi: the VSI to be searched * * Returns true if VSI is in vlan mode or false otherwise
**/ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{ /* If we have a PVID, always operate in VLAN mode */ if (vsi->info.pvid) returntrue;
/* We need to operate in VLAN mode whenever we have any filters with * a VLAN other than I40E_VLAN_ALL. We could check the table each * time, incurring search cost repeatedly. However, we can notice two * things: * * 1) the only place where we can gain a VLAN filter is in * i40e_add_filter. * * 2) the only place where filters are actually removed is in * i40e_sync_filters_subtask. * * Thus, we can simply use a boolean value, has_vlan_filters which we * will set to true when we add a VLAN filter in i40e_add_filter. Then * we have to perform the full search after deleting filters in * i40e_sync_filters_subtask, but we already have to search * filters here and can perform the check at the same time. This * results in avoiding embedding a loop for VLAN mode inside another * loop over all the filters, and should maintain correctness as noted * above.
*/ return vsi->has_vlan_filter;
}
/** * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary * @vsi: the VSI to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters * * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they * behave as expected. If we have any active VLAN filters remaining or about * to be added then we need to update non-VLAN filters to be marked as VLAN=0 * so that they only match against untagged traffic. If we no longer have any * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1 * so that they match against both tagged and untagged traffic. In this way, * we ensure that we correctly receive the desired traffic. This ensures that * when we have an active VLAN we will receive only untagged traffic and * traffic matching active VLANs. If we have no active VLANs then we will * operate in non-VLAN mode and receive all traffic, tagged or untagged. * * Finally, in a similar fashion, this function also corrects filters when * there is an active PVID assigned to this VSI. * * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. * * This function is only expected to be called from within * i40e_sync_vsi_filters. * * NOTE: This function expects to be called while under the * mac_filter_hash_lock
*/ staticint i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_add_list, struct hlist_head *tmp_del_list, int vlan_filters)
{
s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new; struct hlist_node *h; int bkt, new_vlan;
/* To determine if a particular filter needs to be replaced we * have the three following conditions: * * a) if we have a PVID assigned, then all filters which are * not marked as VLAN=PVID must be replaced with filters that * are. * b) otherwise, if we have any active VLANS, all filters * which are marked as VLAN=-1 must be replaced with * filters marked as VLAN=0 * c) finally, if we do not have any active VLANS, all filters * which are marked as VLAN=0 must be replaced with filters * marked as VLAN=-1
*/
/* Update the filters about to be added in place */
hlist_for_each_entry(new, tmp_add_list, hlist) { if (pvid && new->f->vlan != pvid)
new->f->vlan = pvid; elseif (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
new->f->vlan = 0; elseif (!vlan_filters && new->f->vlan == 0)
new->f->vlan = I40E_VLAN_ANY;
}
/* Update the remaining active filters */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { /* Combine the checks for whether a filter needs to be changed * and then determine the new VLAN inside the if block, in * order to avoid duplicating code for adding the new filter * then deleting the old filter.
*/ if ((pvid && f->vlan != pvid) ||
(vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ if (pvid)
new_vlan = pvid; elseif (vlan_filters)
new_vlan = 0; else
new_vlan = I40E_VLAN_ANY;
/* Create the new filter */
add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); if (!add_head) return -ENOMEM;
/* Create a temporary i40e_new_mac_filter */ new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM;
/* Add the new filter to the tmp list */
hlist_add_head(&new->hlist, tmp_add_list);
/* Put the original filter into the delete list */
f->state = I40E_FILTER_REMOVE;
hash_del(&f->hlist);
hlist_add_head(&f->hlist, tmp_del_list);
}
}
vsi->has_vlan_filter = !!vlan_filters;
return 0;
}
/** * i40e_get_vf_new_vlan - Get new vlan id on a vf * @vsi: the vsi to configure * @new_mac: new mac filter to be added * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL * @vlan_filters: the number of active VLAN filters * @trusted: flag if the VF is trusted * * Get new VLAN id based on current VLAN filters, trust, PVID * and vf-vlan-prune-disable flag. * * Returns the value of the new vlan filter or * the old value if no new filter is needed.
*/ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi, struct i40e_new_mac_filter *new_mac, struct i40e_mac_filter *f, int vlan_filters, bool trusted)
{
s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_pf *pf = vsi->back; bool is_any;
/** * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary * @vsi: the vsi to configure * @tmp_add_list: list of filters ready to be added * @tmp_del_list: list of filters ready to be deleted * @vlan_filters: the number of active VLAN filters * @trusted: flag if the VF is trusted * * Correct VF VLAN filters based on current VLAN filters, trust, PVID * and vf-vlan-prune-disable flag. * * In case of memory allocation failure return -ENOMEM. Otherwise, return 0. * * This function is only expected to be called from within * i40e_sync_vsi_filters. * * NOTE: This function expects to be called while under the * mac_filter_hash_lock
*/ staticint i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_add_list, struct hlist_head *tmp_del_list, int vlan_filters, bool trusted)
{ struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new_mac; struct hlist_node *h; int bkt, new_vlan;
/** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * * Remove whatever filter the firmware set up so the driver can manage * its own filtering intelligently.
**/ staticvoid i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{ struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back;
/* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) return;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0; /* Ignore error returns, some firmware does it this way... */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0; /* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
/** * i40e_add_filter - Add a mac/vlan filter to the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * * Returns ptr to the filter object or NULL when no memory available. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held.
**/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
{ struct i40e_mac_filter *f;
u64 key;
if (!vsi || !macaddr) return NULL;
f = i40e_find_filter(vsi, macaddr, vlan); if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return NULL;
/* Update the boolean indicating if we need to function in * VLAN mode.
*/ if (vlan >= 0)
vsi->has_vlan_filter = true;
/* If we're asked to add a filter that has been marked for removal, it * is safe to simply restore it to active state. __i40e_del_filter * will have simply deleted any filters which were previously marked * NEW or FAILED, so if it is currently marked REMOVE it must have * previously been ACTIVE. Since we haven't yet run the sync filters * task, just restore this filter to the ACTIVE state so that the * sync task leaves it in place
*/ if (f->state == I40E_FILTER_REMOVE)
f->state = I40E_FILTER_ACTIVE;
return f;
}
/** * __i40e_del_filter - Remove a specific filter from the VSI * @vsi: VSI to remove from * @f: the filter to remove from the list * * This function requires you've found * the exact filter you will remove * already, such as via i40e_find_filter or i40e_find_mac. * * NOTE: This function is expected to be called with mac_filter_hash_lock * being held. * ANOTHER NOTE: This function MUST be called from within the context of * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * instead of list_for_each_entry().
**/ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{ if (!f) return;
/* If the filter was never added to firmware then we can just delete it * directly and we don't want to set the status to remove or else an * admin queue command will unnecessarily fire.
*/ if ((f->state == I40E_FILTER_FAILED) ||
(f->state == I40E_FILTER_NEW)) {
hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
}
/** * i40e_add_mac_filter - Add a MAC filter for all active VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise, * go through all the macvlan filters and add a macvlan filter for each * unique vlan that already exists. If a PVID has been assigned, instead only * add the macaddr to that VLAN. * * Returns last filter added on success, else NULL
**/ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{ struct i40e_mac_filter *f, *add = NULL; struct hlist_node *h; int bkt;
lockdep_assert_held(&vsi->mac_filter_hash_lock); if (vsi->info.pvid) return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
if (!i40e_is_vsi_in_vlan(vsi)) return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
/** * i40e_del_mac_filter - Remove a MAC filter from all VLANs * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * * Removes a given MAC address from a VSI regardless of what VLAN it has been * associated with. * * Returns 0 for success, or error
**/ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{ struct i40e_mac_filter *f; struct hlist_node *h; bool found = false; int bkt;
lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
found = true;
}
}
if (found) return 0; else return -ENOENT;
}
/** * i40e_set_mac - NDO callback to set mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure
**/ staticint i40e_set_mac(struct net_device *netdev, void *p)
{ struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
if (test_bit(__I40E_DOWN, pf->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
netdev_info(netdev, "returning to hw mac address %pM\n",
hw->mac.addr); else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
/* Copy the address first, so that we avoid a possible race with * .set_rx_mode(). * - Remove old address from MAC filter * - Copy new address * - Add new address to MAC filter
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
eth_hw_addr_set(netdev, addr->sa_data);
i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (vsi->type == I40E_VSI_MAIN) { int ret;
ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL); if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
ERR_PTR(ret),
libie_aq_str(hw->aq.asq_last_status));
}
/* schedule our worker thread which will take care of * applying the new filter changes
*/
i40e_service_event_schedule(pf); return 0;
}
/** * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed * @lut: pointer to lookup table of lut_size * @lut_size: size of the lookup table
**/ staticint i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{ struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int ret = 0;
if (seed) { struct i40e_aqc_get_set_rss_key_data *seed_dw =
(struct i40e_aqc_get_set_rss_key_data *)seed;
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); if (ret) {
dev_info(&pf->pdev->dev, "Cannot set RSS key, err %pe aq_err %s\n",
ERR_PTR(ret),
libie_aq_str(hw->aq.asq_last_status)); return ret;
}
} if (lut) { bool pf_lut = vsi->type == I40E_VSI_MAIN;
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); if (ret) {
dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %pe aq_err %s\n",
ERR_PTR(ret),
libie_aq_str(hw->aq.asq_last_status)); return ret;
}
} return ret;
}
/** * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used * @vsi: VSI structure
**/ staticint i40e_vsi_config_rss(struct i40e_vsi *vsi)
{ struct i40e_pf *pf = vsi->back;
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut; int ret;
if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) return 0; if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs); if (!vsi->rss_size) return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM;
/* Use the user configured hash keys and lookup table if there is one, * otherwise use default
*/ if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
kfree(lut); return ret;
}
/** * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config * @vsi: the VSI being configured, * @ctxt: VSI context structure * @enabled_tc: number of traffic classes to enable * * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
**/ staticint i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt,
u8 enabled_tc)
{
u16 qcount = 0, max_qcount, qmap, sections = 0; int i, override_q, pow, num_qps, ret;
u8 netdev_tc = 0, offset = 0;
/* find the next higher power-of-2 of num queue pairs */
pow = ilog2(num_qps); if (!is_power_of_2(num_qps))
pow++;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
/* Setup queue offset/count for all TCs for given VSI */
max_qcount = vsi->mqprio_qopt.qopt.count[0]; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) {
offset = vsi->mqprio_qopt.qopt.offset[i];
qcount = vsi->mqprio_qopt.qopt.count[i]; if (qcount > max_qcount)
max_qcount = qcount;
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
} else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC.
*/
vsi->tc_config.tc_info[i].qoffset = 0;
vsi->tc_config.tc_info[i].qcount = 1;
vsi->tc_config.tc_info[i].netdev_tc = 0;
}
}
/* Set actual Tx/Rx queue pairs */
vsi->num_queue_pairs = offset + qcount;
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
ctxt->info.valid_sections |= cpu_to_le16(sections);
/* Reconfigure RSS for main VSI with max queue count */
vsi->rss_size = max_qcount;
ret = i40e_vsi_config_rss(vsi); if (ret) {
dev_info(&vsi->back->pdev->dev, "Failed to reconfig rss for num_queues (%u)\n",
max_qcount); return ret;
}
vsi->reconfig_rss = true;
dev_dbg(&vsi->back->pdev->dev, "Reconfigured rss with num_queues (%u)\n", max_qcount);
/* Find queue count available for channel VSIs and starting offset * for channel VSIs
*/
override_q = vsi->mqprio_qopt.qopt.count[0]; if (override_q && override_q < vsi->num_queue_pairs) {
vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
vsi->next_base_queue = override_q;
} return 0;
}
/** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * @vsi: the VSI being setup * @ctxt: VSI context structure * @enabled_tc: Enabled TCs bitmap * @is_add: True if called before Add VSI * * Setup VSI queue mapping for enabled traffic classes.
**/ staticvoid i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add)
{ struct i40e_pf *pf = vsi->back;
u16 num_tc_qps = 0;
u16 sections = 0;
u8 netdev_tc = 0;
u16 numtc = 1;
u16 qcount;
u8 offset;
u16 qmap; int i;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0; /* zero out queue mapping, it will get updated on the end of the function */
memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
if (vsi->type == I40E_VSI_MAIN) { /* This code helps add more queue to the VSI if we have * more cores than RSS can support, the higher cores will * be served by ATR or other filters. Furthermore, the * non-zero req_queue_pairs says that user requested a new * queue count via ethtool's set_channels, so use this * value for queues distribution across traffic classes * We need at least one queue pair for the interface * to be usable as we see in else statement.
*/ if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs; elseif (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_queue_pairs = pf->num_lan_msix; else
vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */ if (vsi->type == I40E_VSI_MAIN ||
(vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
num_tc_qps = vsi->num_queue_pairs; else
num_tc_qps = vsi->alloc_queue_pairs;
if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */
numtc++;
} if (!numtc) {
dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
numtc = 1;
}
num_tc_qps = num_tc_qps / numtc;
num_tc_qps = min_t(int, num_tc_qps,
i40e_pf_get_max_q_per_tc(pf));
}
/* Do not allow use more TC queue pairs than MSI-X vectors exist */ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps;
switch (vsi->type) { case I40E_VSI_MAIN: if ((!test_bit(I40E_FLAG_FD_SB_ENA,
pf->flags) &&
!test_bit(I40E_FLAG_FD_ATR_ENA,
pf->flags)) ||
vsi->tc_config.enabled_tc != 1) {
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps); break;
}
fallthrough; case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: default:
qcount = num_tc_qps;
WARN_ON(i != 0); break;
}
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
/* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0; while (num_qps && (BIT_ULL(pow) < qcount)) {
pow++;
num_qps >>= 1;
}
offset += qcount;
} else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC.
*/
vsi->tc_config.tc_info[i].qoffset = 0;
vsi->tc_config.tc_info[i].qcount = 1;
vsi->tc_config.tc_info[i].netdev_tc = 0;
qmap = 0;
}
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
} /* Do not change previously set num_queue_pairs for PFs and VFs*/ if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
(vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
(vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
vsi->num_queue_pairs = offset;
/* Scheduler section valid can only be set for ADD VSI */ if (is_add) {
sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.