Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/net/ethernet/intel/iavf/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 157 kB image not shown  

Quelle  iavf_main.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */

#include <linux/net/intel/libie/rx.h>
#include <net/netdev_lock.h>

#include "iavf.h"
#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
 * be included exactly once across the whole kernel with
 * CREATE_TRACE_POINTS defined
 */

#define CREATE_TRACE_POINTS
#include "iavf_trace.h"

static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
static void iavf_init_get_resources(struct iavf_adapter *adapter);
static int iavf_check_reset_complete(struct iavf_hw *hw);

char iavf_driver_name[] = "iavf";
static const char iavf_driver_string[] =
 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";

static const char iavf_copyright[] =
 "Copyright (c) 2013 - 2018 Intel Corporation.";

/* iavf_pci_tbl - PCI Device ID Table
 *
 * Wildcard entries (PCI_ANY_ID) should come last
 * Last entry must be all 0s
 *
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 *   Class, Class Mask, private data (not used) }
 */

static const struct pci_device_id iavf_pci_tbl[] = {
 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
 /* required last entry */
 {0, }
};

MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);

MODULE_ALIAS("i40evf");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
MODULE_IMPORT_NS("LIBETH");
MODULE_IMPORT_NS("LIBIE");
MODULE_IMPORT_NS("LIBIE_ADMINQ");
MODULE_LICENSE("GPL v2");

static const struct net_device_ops iavf_netdev_ops;

int iavf_status_to_errno(enum iavf_status status)
{
 switch (status) {
 case IAVF_SUCCESS:
  return 0;
 case IAVF_ERR_PARAM:
 case IAVF_ERR_MAC_TYPE:
 case IAVF_ERR_INVALID_MAC_ADDR:
 case IAVF_ERR_INVALID_LINK_SETTINGS:
 case IAVF_ERR_INVALID_PD_ID:
 case IAVF_ERR_INVALID_QP_ID:
 case IAVF_ERR_INVALID_CQ_ID:
 case IAVF_ERR_INVALID_CEQ_ID:
 case IAVF_ERR_INVALID_AEQ_ID:
 case IAVF_ERR_INVALID_SIZE:
 case IAVF_ERR_INVALID_ARP_INDEX:
 case IAVF_ERR_INVALID_FPM_FUNC_ID:
 case IAVF_ERR_QP_INVALID_MSG_SIZE:
 case IAVF_ERR_INVALID_FRAG_COUNT:
 case IAVF_ERR_INVALID_ALIGNMENT:
 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
 case IAVF_ERR_INVALID_IMM_DATA_SIZE:
 case IAVF_ERR_INVALID_VF_ID:
 case IAVF_ERR_INVALID_HMCFN_ID:
 case IAVF_ERR_INVALID_PBLE_INDEX:
 case IAVF_ERR_INVALID_SD_INDEX:
 case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
 case IAVF_ERR_INVALID_SD_TYPE:
 case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
 case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
  return -EINVAL;
 case IAVF_ERR_NVM:
 case IAVF_ERR_NVM_CHECKSUM:
 case IAVF_ERR_PHY:
 case IAVF_ERR_CONFIG:
 case IAVF_ERR_UNKNOWN_PHY:
 case IAVF_ERR_LINK_SETUP:
 case IAVF_ERR_ADAPTER_STOPPED:
 case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
 case IAVF_ERR_AUTONEG_NOT_COMPLETE:
 case IAVF_ERR_RESET_FAILED:
 case IAVF_ERR_BAD_PTR:
 case IAVF_ERR_SWFW_SYNC:
 case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
 case IAVF_ERR_QUEUE_EMPTY:
 case IAVF_ERR_FLUSHED_QUEUE:
 case IAVF_ERR_OPCODE_MISMATCH:
 case IAVF_ERR_CQP_COMPL_ERROR:
 case IAVF_ERR_BACKING_PAGE_ERROR:
 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
 case IAVF_ERR_MEMCPY_FAILED:
 case IAVF_ERR_SRQ_ENABLED:
 case IAVF_ERR_ADMIN_QUEUE_ERROR:
 case IAVF_ERR_ADMIN_QUEUE_FULL:
 case IAVF_ERR_BAD_RDMA_CQE:
 case IAVF_ERR_NVM_BLANK_MODE:
 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
 case IAVF_ERR_DIAG_TEST_FAILED:
 case IAVF_ERR_FIRMWARE_API_VERSION:
 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
  return -EIO;
 case IAVF_ERR_DEVICE_NOT_SUPPORTED:
  return -ENODEV;
 case IAVF_ERR_NO_AVAILABLE_VSI:
 case IAVF_ERR_RING_FULL:
  return -ENOSPC;
 case IAVF_ERR_NO_MEMORY:
  return -ENOMEM;
 case IAVF_ERR_TIMEOUT:
 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
  return -ETIMEDOUT;
 case IAVF_ERR_NOT_IMPLEMENTED:
 case IAVF_NOT_SUPPORTED:
  return -EOPNOTSUPP;
 case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
  return -EALREADY;
 case IAVF_ERR_NOT_READY:
  return -EBUSY;
 case IAVF_ERR_BUF_TOO_SHORT:
  return -EMSGSIZE;
 }

 return -EIO;
}

int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
{
 switch (v_status) {
 case VIRTCHNL_STATUS_SUCCESS:
  return 0;
 case VIRTCHNL_STATUS_ERR_PARAM:
 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
  return -EINVAL;
 case VIRTCHNL_STATUS_ERR_NO_MEMORY:
  return -ENOMEM;
 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
  return -EIO;
 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
  return -EOPNOTSUPP;
 }

 return -EIO;
}

/**
 * iavf_pdev_to_adapter - go from pci_dev to adapter
 * @pdev: pci_dev pointer
 */

static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
{
 return netdev_priv(pci_get_drvdata(pdev));
}

/**
 * iavf_is_reset_in_progress - Check if a reset is in progress
 * @adapter: board private structure
 */

static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
{
 if (adapter->state == __IAVF_RESETTING ||
     adapter->flags & (IAVF_FLAG_RESET_PENDING |
         IAVF_FLAG_RESET_NEEDED))
  return true;

 return false;
}

/**
 * iavf_wait_for_reset - Wait for reset to finish.
 * @adapter: board private structure
 *
 * Returns 0 if reset finished successfully, negative on timeout or interrupt.
 */

int iavf_wait_for_reset(struct iavf_adapter *adapter)
{
 int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
     !iavf_is_reset_in_progress(adapter),
     msecs_to_jiffies(5000));

 /* If ret < 0 then it means wait was interrupted.
 * If ret == 0 then it means we got a timeout while waiting
 * for reset to finish.
 * If ret > 0 it means reset has finished.
 */

 if (ret > 0)
  return 0;
 else if (ret < 0)
  return -EINTR;
 else
  return -EBUSY;
}

/**
 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to fill out
 * @size: size of memory requested
 * @alignment: what to align the allocation to
 **/

enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
      struct iavf_dma_mem *mem,
      u64 size, u32 alignment)
{
 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;

 if (!mem)
  return IAVF_ERR_PARAM;

 mem->size = ALIGN(size, alignment);
 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
         (dma_addr_t *)&mem->pa, GFP_KERNEL);
 if (mem->va)
  return 0;
 else
  return IAVF_ERR_NO_MEMORY;
}

/**
 * iavf_free_dma_mem - wrapper for DMA memory freeing
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to free
 **/

enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
{
 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;

 if (!mem || !mem->va)
  return IAVF_ERR_PARAM;
 dma_free_coherent(&adapter->pdev->dev, mem->size,
     mem->va, (dma_addr_t)mem->pa);
 return 0;
}

/**
 * iavf_allocate_virt_mem - virt memory alloc wrapper
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to fill out
 * @size: size of memory requested
 **/

enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
     struct iavf_virt_mem *mem, u32 size)
{
 if (!mem)
  return IAVF_ERR_PARAM;

 mem->size = size;
 mem->va = kzalloc(size, GFP_KERNEL);

 if (mem->va)
  return 0;
 else
  return IAVF_ERR_NO_MEMORY;
}

/**
 * iavf_free_virt_mem - virt memory free wrapper
 * @hw:   pointer to the HW structure
 * @mem:  ptr to mem struct to free
 **/

void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
{
 kfree(mem->va);
}

/**
 * iavf_schedule_reset - Set the flags and schedule a reset event
 * @adapter: board private structure
 * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
 **/

void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
{
 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
     !(adapter->flags &
     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
  adapter->flags |= flags;
  queue_work(adapter->wq, &adapter->reset_task);
 }
}

/**
 * iavf_schedule_aq_request - Set the flags and schedule aq request
 * @adapter: board private structure
 * @flags: requested aq flags
 **/

void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
{
 adapter->aq_required |= flags;
 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}

/**
 * iavf_tx_timeout - Respond to a Tx Hang
 * @netdev: network interface device structure
 * @txqueue: queue number that is timing out
 **/

static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);

 adapter->tx_timeout_count++;
 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
}

/**
 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
 * @adapter: board private structure
 **/

static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;

 if (!adapter->msix_entries)
  return;

 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);

 iavf_flush(hw);

 synchronize_irq(adapter->msix_entries[0].vector);
}

/**
 * iavf_misc_irq_enable - Enable default interrupt generation settings
 * @adapter: board private structure
 **/

static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;

 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
           IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);

 iavf_flush(hw);
}

/**
 * iavf_irq_disable - Mask off interrupt generation on the NIC
 * @adapter: board private structure
 **/

static void iavf_irq_disable(struct iavf_adapter *adapter)
{
 int i;
 struct iavf_hw *hw = &adapter->hw;

 if (!adapter->msix_entries)
  return;

 for (i = 1; i < adapter->num_msix_vectors; i++) {
  wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
  synchronize_irq(adapter->msix_entries[i].vector);
 }
 iavf_flush(hw);
}

/**
 * iavf_irq_enable_queues - Enable interrupt for all queues
 * @adapter: board private structure
 **/

static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;
 int i;

 for (i = 1; i < adapter->num_msix_vectors; i++) {
  wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
       IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
       IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
 }
}

/**
 * iavf_irq_enable - Enable default interrupt generation settings
 * @adapter: board private structure
 * @flush: boolean value whether to run rd32()
 **/

void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
{
 struct iavf_hw *hw = &adapter->hw;

 iavf_misc_irq_enable(adapter);
 iavf_irq_enable_queues(adapter);

 if (flush)
  iavf_flush(hw);
}

/**
 * iavf_msix_aq - Interrupt handler for vector 0
 * @irq: interrupt number
 * @data: pointer to netdev
 **/

static irqreturn_t iavf_msix_aq(int irq, void *data)
{
 struct net_device *netdev = data;
 struct iavf_adapter *adapter = netdev_priv(netdev);
 struct iavf_hw *hw = &adapter->hw;

 /* handle non-queue interrupts, these reads clear the registers */
 rd32(hw, IAVF_VFINT_ICR01);
 rd32(hw, IAVF_VFINT_ICR0_ENA1);

 if (adapter->state != __IAVF_REMOVE)
  /* schedule work on the private workqueue */
  queue_work(adapter->wq, &adapter->adminq_task);

 return IRQ_HANDLED;
}

/**
 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a q_vector
 **/

static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
{
 struct iavf_q_vector *q_vector = data;

 if (!q_vector->tx.ring && !q_vector->rx.ring)
  return IRQ_HANDLED;

 napi_schedule_irqoff(&q_vector->napi);

 return IRQ_HANDLED;
}

/**
 * iavf_map_vector_to_rxq - associate irqs with rx queues
 * @adapter: board private structure
 * @v_idx: interrupt number
 * @r_idx: queue number
 **/

static void
iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
{
 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
 struct iavf_hw *hw = &adapter->hw;

 rx_ring->q_vector = q_vector;
 rx_ring->next = q_vector->rx.ring;
 rx_ring->vsi = &adapter->vsi;
 q_vector->rx.ring = rx_ring;
 q_vector->rx.count++;
 q_vector->rx.next_update = jiffies + 1;
 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 q_vector->ring_mask |= BIT(r_idx);
 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
      q_vector->rx.current_itr >> 1);
 q_vector->rx.current_itr = q_vector->rx.target_itr;
}

/**
 * iavf_map_vector_to_txq - associate irqs with tx queues
 * @adapter: board private structure
 * @v_idx: interrupt number
 * @t_idx: queue number
 **/

static void
iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
{
 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
 struct iavf_hw *hw = &adapter->hw;

 tx_ring->q_vector = q_vector;
 tx_ring->next = q_vector->tx.ring;
 tx_ring->vsi = &adapter->vsi;
 q_vector->tx.ring = tx_ring;
 q_vector->tx.count++;
 q_vector->tx.next_update = jiffies + 1;
 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 q_vector->num_ringpairs++;
 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
      q_vector->tx.target_itr >> 1);
 q_vector->tx.current_itr = q_vector->tx.target_itr;
}

/**
 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
 * @adapter: board private structure to initialize
 *
 * This function maps descriptor rings to the queue-specific vectors
 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 * one vector per ring/queue, but on a constrained vector budget, we
 * group the rings as "efficiently" as possible.  You would add new
 * mapping configurations in here.
 **/

static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
{
 int rings_remaining = adapter->num_active_queues;
 int ridx = 0, vidx = 0;
 int q_vectors;

 q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (; ridx < rings_remaining; ridx++) {
  iavf_map_vector_to_rxq(adapter, vidx, ridx);
  iavf_map_vector_to_txq(adapter, vidx, ridx);

  /* In the case where we have more queues than vectors, continue
 * round-robin on vectors until all queues are mapped.
 */

  if (++vidx >= q_vectors)
   vidx = 0;
 }

 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
}

/**
 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
 * @adapter: board private structure
 * @basename: device basename
 *
 * Allocates MSI-X vectors for tx and rx handling, and requests
 * interrupts from the kernel.
 **/

static int
iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
{
 unsigned int vector, q_vectors;
 unsigned int rx_int_idx = 0, tx_int_idx = 0;
 int irq_num, err;

 iavf_irq_disable(adapter);
 /* Decrement for Other and TCP Timer vectors */
 q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (vector = 0; vector < q_vectors; vector++) {
  struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];

  irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;

  if (q_vector->tx.ring && q_vector->rx.ring) {
   snprintf(q_vector->name, sizeof(q_vector->name),
     "iavf-%s-TxRx-%u", basename, rx_int_idx++);
   tx_int_idx++;
  } else if (q_vector->rx.ring) {
   snprintf(q_vector->name, sizeof(q_vector->name),
     "iavf-%s-rx-%u", basename, rx_int_idx++);
  } else if (q_vector->tx.ring) {
   snprintf(q_vector->name, sizeof(q_vector->name),
     "iavf-%s-tx-%u", basename, tx_int_idx++);
  } else {
   /* skip this unused q_vector */
   continue;
  }
  err = request_irq(irq_num,
      iavf_msix_clean_rings,
      0,
      q_vector->name,
      q_vector);
  if (err) {
   dev_info(&adapter->pdev->dev,
     "Request_irq failed, error: %d\n", err);
   goto free_queue_irqs;
  }
 }

 return 0;

free_queue_irqs:
 while (vector) {
  vector--;
  irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
  free_irq(irq_num, &adapter->q_vectors[vector]);
 }
 return err;
}

/**
 * iavf_request_misc_irq - Initialize MSI-X interrupts
 * @adapter: board private structure
 *
 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
 * vector is only for the admin queue, and stays active even when the netdev
 * is closed.
 **/

static int iavf_request_misc_irq(struct iavf_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;
 int err;

 snprintf(adapter->misc_vector_name,
   sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
   dev_name(&adapter->pdev->dev));
 err = request_irq(adapter->msix_entries[0].vector,
     &iavf_msix_aq, 0,
     adapter->misc_vector_name, netdev);
 if (err) {
  dev_err(&adapter->pdev->dev,
   "request_irq for %s failed: %d\n",
   adapter->misc_vector_name, err);
  free_irq(adapter->msix_entries[0].vector, netdev);
 }
 return err;
}

/**
 * iavf_free_traffic_irqs - Free MSI-X interrupts
 * @adapter: board private structure
 *
 * Frees all MSI-X vectors other than 0.
 **/

static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
 struct iavf_q_vector *q_vector;
 int vector, irq_num, q_vectors;

 if (!adapter->msix_entries)
  return;

 q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (vector = 0; vector < q_vectors; vector++) {
  q_vector = &adapter->q_vectors[vector];
  netif_napi_set_irq_locked(&q_vector->napi, -1);
  irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
  free_irq(irq_num, q_vector);
 }
}

/**
 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
 * @adapter: board private structure
 *
 * Frees MSI-X vector 0.
 **/

static void iavf_free_misc_irq(struct iavf_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;

 if (!adapter->msix_entries)
  return;

 free_irq(adapter->msix_entries[0].vector, netdev);
}

/**
 * iavf_configure_tx - Configure Transmit Unit after Reset
 * @adapter: board private structure
 *
 * Configure the Tx unit of the MAC after a reset.
 **/

static void iavf_configure_tx(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;
 int i;

 for (i = 0; i < adapter->num_active_queues; i++)
  adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
}

/**
 * iavf_select_rx_desc_format - Select Rx descriptor format
 * @adapter: adapter private structure
 *
 * Select what Rx descriptor format based on availability and enabled
 * features.
 *
 * Return: the desired RXDID to select for a given Rx queue, as defined by
 *         enum virtchnl_rxdid_format.
 */

static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
{
 u64 rxdids = adapter->supp_rxdids;

 /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
 * stick with the default value of the legacy 32 byte format.
 */

 if (!IAVF_RXDID_ALLOWED(adapter))
  return VIRTCHNL_RXDID_1_32B_BASE;

 /* Rx timestamping requires the use of flexible NIC descriptors */
 if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
  if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
   return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;

  pci_warn(adapter->pdev,
    "Unable to negotiate flexible descriptor format\n");
 }

 /* Warn if the PF does not list support for the default legacy
 * descriptor format. This shouldn't happen, as this is the format
 * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
 * likely caused by a bug in the PF implementation failing to indicate
 * support for the format.
 */

 if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
  netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");

 return VIRTCHNL_RXDID_1_32B_BASE;
}

/**
 * iavf_configure_rx - Configure Receive Unit after Reset
 * @adapter: board private structure
 *
 * Configure the Rx unit of the MAC after a reset.
 **/

static void iavf_configure_rx(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;

 adapter->rxdid = iavf_select_rx_desc_format(adapter);

 for (u32 i = 0; i < adapter->num_active_queues; i++) {
  adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
  adapter->rx_rings[i].rxdid = adapter->rxdid;
 }
}

/**
 * iavf_find_vlan - Search filter list for specific vlan filter
 * @adapter: board private structure
 * @vlan: vlan tag
 *
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * mac_vlan_list_lock.
 **/

static struct
iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
     struct iavf_vlan vlan)
{
 struct iavf_vlan_filter *f;

 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  if (f->vlan.vid == vlan.vid &&
      f->vlan.tpid == vlan.tpid)
   return f;
 }

 return NULL;
}

/**
 * iavf_add_vlan - Add a vlan filter to the list
 * @adapter: board private structure
 * @vlan: VLAN tag
 *
 * Returns ptr to the filter object or NULL when no memory available.
 **/

static struct
iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
    struct iavf_vlan vlan)
{
 struct iavf_vlan_filter *f = NULL;

 spin_lock_bh(&adapter->mac_vlan_list_lock);

 f = iavf_find_vlan(adapter, vlan);
 if (!f) {
  f = kzalloc(sizeof(*f), GFP_ATOMIC);
  if (!f)
   goto clearout;

  f->vlan = vlan;

  list_add_tail(&f->list, &adapter->vlan_filter_list);
  f->state = IAVF_VLAN_ADD;
  adapter->num_vlan_filters++;
  iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
 } else if (f->state == IAVF_VLAN_REMOVE) {
  /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
 * We can safely only change the state here.
 */

  f->state = IAVF_VLAN_ACTIVE;
 }

clearout:
 spin_unlock_bh(&adapter->mac_vlan_list_lock);
 return f;
}

/**
 * iavf_del_vlan - Remove a vlan filter from the list
 * @adapter: board private structure
 * @vlan: VLAN tag
 **/

static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{
 struct iavf_vlan_filter *f;

 spin_lock_bh(&adapter->mac_vlan_list_lock);

 f = iavf_find_vlan(adapter, vlan);
 if (f) {
  /* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
 * Remove it from the list.
 */

  if (f->state == IAVF_VLAN_ADD) {
   list_del(&f->list);
   kfree(f);
   adapter->num_vlan_filters--;
  } else {
   f->state = IAVF_VLAN_REMOVE;
   iavf_schedule_aq_request(adapter,
       IAVF_FLAG_AQ_DEL_VLAN_FILTER);
  }
 }

 spin_unlock_bh(&adapter->mac_vlan_list_lock);
}

/**
 * iavf_restore_filters
 * @adapter: board private structure
 *
 * Restore existing non MAC filters when VF netdev comes back up
 **/

static void iavf_restore_filters(struct iavf_adapter *adapter)
{
 struct iavf_vlan_filter *f;

 /* re-add all VLAN filters */
 spin_lock_bh(&adapter->mac_vlan_list_lock);

 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  if (f->state == IAVF_VLAN_INACTIVE)
   f->state = IAVF_VLAN_ADD;
 }

 spin_unlock_bh(&adapter->mac_vlan_list_lock);
 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}

/**
 * iavf_get_num_vlans_added - get number of VLANs added
 * @adapter: board private structure
 */

u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
 return adapter->num_vlan_filters;
}

/**
 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
 * @adapter: board private structure
 *
 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
 * do not impose a limit as that maintains current behavior and for
 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
 **/

static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
{
 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
 * never been a limit on the VF driver side
 */

 if (VLAN_ALLOWED(adapter))
  return VLAN_N_VID;
 else if (VLAN_V2_ALLOWED(adapter))
  return adapter->vlan_v2_caps.filtering.max_filters;

 return 0;
}

/**
 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
 * @adapter: board private structure
 **/

static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
{
 if (iavf_get_num_vlans_added(adapter) <
     iavf_get_max_vlans_allowed(adapter))
  return false;

 return true;
}

/**
 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
 * @netdev: network device struct
 * @proto: unused protocol data
 * @vid: VLAN tag
 **/

static int iavf_vlan_rx_add_vid(struct net_device *netdev,
    __always_unused __be16 proto, u16 vid)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);

 /* Do not track VLAN 0 filter, always added by the PF on VF init */
 if (!vid)
  return 0;

 if (!VLAN_FILTERING_ALLOWED(adapter))
  return -EIO;

 if (iavf_max_vlans_added(adapter)) {
  netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
      iavf_get_max_vlans_allowed(adapter));
  return -EIO;
 }

 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
  return -ENOMEM;

 return 0;
}

/**
 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
 * @netdev: network device struct
 * @proto: unused protocol data
 * @vid: VLAN tag
 **/

static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
     __always_unused __be16 proto, u16 vid)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);

 /* We do not track VLAN 0 filter */
 if (!vid)
  return 0;

 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
 return 0;
}

/**
 * iavf_find_filter - Search filter list for specific mac filter
 * @adapter: board private structure
 * @macaddr: the MAC address
 *
 * Returns ptr to the filter object or NULL. Must be called while holding the
 * mac_vlan_list_lock.
 **/

static struct
iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
      const u8 *macaddr)
{
 struct iavf_mac_filter *f;

 if (!macaddr)
  return NULL;

 list_for_each_entry(f, &adapter->mac_filter_list, list) {
  if (ether_addr_equal(macaddr, f->macaddr))
   return f;
 }
 return NULL;
}

/**
 * iavf_add_filter - Add a mac filter to the filter list
 * @adapter: board private structure
 * @macaddr: the MAC address
 *
 * Returns ptr to the filter object or NULL when no memory available.
 **/

struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
     const u8 *macaddr)
{
 struct iavf_mac_filter *f;

 if (!macaddr)
  return NULL;

 f = iavf_find_filter(adapter, macaddr);
 if (!f) {
  f = kzalloc(sizeof(*f), GFP_ATOMIC);
  if (!f)
   return f;

  ether_addr_copy(f->macaddr, macaddr);

  list_add_tail(&f->list, &adapter->mac_filter_list);
  f->add = true;
  f->add_handled = false;
  f->is_new_mac = true;
  f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
  adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
 } else {
  f->remove = false;
 }

 return f;
}

/**
 * iavf_replace_primary_mac - Replace current primary address
 * @adapter: board private structure
 * @new_mac: new MAC address to be applied
 *
 * Replace current dev_addr and send request to PF for removal of previous
 * primary MAC address filter and addition of new primary MAC filter.
 * Return 0 for success, -ENOMEM for failure.
 *
 * Do not call this with mac_vlan_list_lock!
 **/

static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
        const u8 *new_mac)
{
 struct iavf_hw *hw = &adapter->hw;
 struct iavf_mac_filter *new_f;
 struct iavf_mac_filter *old_f;

 spin_lock_bh(&adapter->mac_vlan_list_lock);

 new_f = iavf_add_filter(adapter, new_mac);
 if (!new_f) {
  spin_unlock_bh(&adapter->mac_vlan_list_lock);
  return -ENOMEM;
 }

 old_f = iavf_find_filter(adapter, hw->mac.addr);
 if (old_f) {
  old_f->is_primary = false;
  old_f->remove = true;
  adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
 }
 /* Always send the request to add if changing primary MAC,
 * even if filter is already present on the list
 */

 new_f->is_primary = true;
 new_f->add = true;
 ether_addr_copy(hw->mac.addr, new_mac);

 spin_unlock_bh(&adapter->mac_vlan_list_lock);

 /* schedule the watchdog task to immediately process the request */
 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
 return 0;
}

/**
 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
 * @netdev: network interface device structure
 * @macaddr: MAC address to set
 *
 * Returns true on success, false on failure
 */

static bool iavf_is_mac_set_handled(struct net_device *netdev,
        const u8 *macaddr)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);
 struct iavf_mac_filter *f;
 bool ret = false;

 spin_lock_bh(&adapter->mac_vlan_list_lock);

 f = iavf_find_filter(adapter, macaddr);

 if (!f || (!f->add && f->add_handled))
  ret = true;

 spin_unlock_bh(&adapter->mac_vlan_list_lock);

 return ret;
}

/**
 * iavf_set_mac - NDO callback to set port MAC address
 * @netdev: network interface device structure
 * @p: pointer to an address structure
 *
 * Returns 0 on success, negative on failure
 */

static int iavf_set_mac(struct net_device *netdev, void *p)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);
 struct sockaddr *addr = p;
 int ret;

 if (!is_valid_ether_addr(addr->sa_data))
  return -EADDRNOTAVAIL;

 ret = iavf_replace_primary_mac(adapter, addr->sa_data);

 if (ret)
  return ret;

 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
            iavf_is_mac_set_handled(netdev, addr->sa_data),
            msecs_to_jiffies(2500));

 /* If ret < 0 then it means wait was interrupted.
 * If ret == 0 then it means we got a timeout.
 * else it means we got response for set MAC from PF,
 * check if netdev MAC was updated to requested MAC,
 * if yes then set MAC succeeded otherwise it failed return -EACCES
 */

 if (ret < 0)
  return ret;

 if (!ret)
  return -EAGAIN;

 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
  return -EACCES;

 return 0;
}

/**
 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
 * @netdev: the netdevice
 * @addr: address to add
 *
 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 */

static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);

 if (iavf_add_filter(adapter, addr))
  return 0;
 else
  return -ENOMEM;
}

/**
 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
 * @netdev: the netdevice
 * @addr: address to add
 *
 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 */

static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);
 struct iavf_mac_filter *f;

 /* Under some circumstances, we might receive a request to delete
 * our own device address from our uc list. Because we store the
 * device address in the VSI's MAC/VLAN filter list, we need to ignore
 * such requests and not delete our device address from this list.
 */

 if (ether_addr_equal(addr, netdev->dev_addr))
  return 0;

 f = iavf_find_filter(adapter, addr);
 if (f) {
  f->remove = true;
  adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
 }
 return 0;
}

/**
 * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
 * @adapter: device specific adapter
 */

bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
{
 return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
  (IFF_PROMISC | IFF_ALLMULTI);
}

/**
 * iavf_set_rx_mode - NDO callback to set the netdev filters
 * @netdev: network interface device structure
 **/

static void iavf_set_rx_mode(struct net_device *netdev)
{
 struct iavf_adapter *adapter = netdev_priv(netdev);

 spin_lock_bh(&adapter->mac_vlan_list_lock);
 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
 spin_unlock_bh(&adapter->mac_vlan_list_lock);

 spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
 if (iavf_promiscuous_mode_changed(adapter))
  adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
}

/**
 * iavf_napi_enable_all - enable NAPI on all queue vectors
 * @adapter: board private structure
 **/

static void iavf_napi_enable_all(struct iavf_adapter *adapter)
{
 int q_idx;
 struct iavf_q_vector *q_vector;
 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  struct napi_struct *napi;

  q_vector = &adapter->q_vectors[q_idx];
  napi = &q_vector->napi;
  napi_enable_locked(napi);
 }
}

/**
 * iavf_napi_disable_all - disable NAPI on all queue vectors
 * @adapter: board private structure
 **/

static void iavf_napi_disable_all(struct iavf_adapter *adapter)
{
 int q_idx;
 struct iavf_q_vector *q_vector;
 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
  q_vector = &adapter->q_vectors[q_idx];
  napi_disable_locked(&q_vector->napi);
 }
}

/**
 * iavf_configure - set up transmit and receive data structures
 * @adapter: board private structure
 **/

static void iavf_configure(struct iavf_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;
 int i;

 iavf_set_rx_mode(netdev);

 iavf_configure_tx(adapter);
 iavf_configure_rx(adapter);
 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;

 for (i = 0; i < adapter->num_active_queues; i++) {
  struct iavf_ring *ring = &adapter->rx_rings[i];

  iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
 }
}

/**
 * iavf_up_complete - Finish the last steps of bringing up a connection
 * @adapter: board private structure
 */

static void iavf_up_complete(struct iavf_adapter *adapter)
{
 netdev_assert_locked(adapter->netdev);

 iavf_change_state(adapter, __IAVF_RUNNING);
 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);

 iavf_napi_enable_all(adapter);

 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
}

/**
 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
 * yet and mark other to be removed.
 * @adapter: board private structure
 **/

static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{
 struct iavf_vlan_filter *vlf, *vlftmp;
 struct iavf_mac_filter *f, *ftmp;

 spin_lock_bh(&adapter->mac_vlan_list_lock);
 /* clear the sync flag on all filters */
 __dev_uc_unsync(adapter->netdev, NULL);
 __dev_mc_unsync(adapter->netdev, NULL);

 /* remove all MAC filters */
 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
     list) {
  if (f->add) {
   list_del(&f->list);
   kfree(f);
  } else {
   f->remove = true;
  }
 }

 /* disable all VLAN filters */
 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
     list)
  vlf->state = IAVF_VLAN_DISABLE;

 spin_unlock_bh(&adapter->mac_vlan_list_lock);
}

/**
 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
 * mark other to be removed.
 * @adapter: board private structure
 **/

static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
{
 struct iavf_cloud_filter *cf, *cftmp;

 /* remove all cloud filters */
 spin_lock_bh(&adapter->cloud_filter_list_lock);
 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
     list) {
  if (cf->add) {
   list_del(&cf->list);
   kfree(cf);
   adapter->num_cloud_filters--;
  } else {
   cf->del = true;
  }
 }
 spin_unlock_bh(&adapter->cloud_filter_list_lock);
}

/**
 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
 * other to be removed.
 * @adapter: board private structure
 **/

static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
 struct iavf_fdir_fltr *fdir;

 /* remove all Flow Director filters */
 spin_lock_bh(&adapter->fdir_fltr_lock);
 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
  if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
   /* Cancel a request, keep filter as inactive */
   fdir->state = IAVF_FDIR_FLTR_INACTIVE;
  } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
    fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
   /* Disable filters which are active or have a pending
 * request to PF to be added
 */

   fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
  }
 }
 spin_unlock_bh(&adapter->fdir_fltr_lock);
}

/**
 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
 * other to be removed.
 * @adapter: board private structure
 **/

static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
{
 struct iavf_adv_rss *rss, *rsstmp;

 /* remove all advance RSS configuration */
 spin_lock_bh(&adapter->adv_rss_lock);
 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
     list) {
  if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
   list_del(&rss->list);
   kfree(rss);
  } else {
   rss->state = IAVF_ADV_RSS_DEL_REQUEST;
  }
 }
 spin_unlock_bh(&adapter->adv_rss_lock);
}

/**
 * iavf_down - Shutdown the connection processing
 * @adapter: board private structure
 */

void iavf_down(struct iavf_adapter *adapter)
{
 struct net_device *netdev = adapter->netdev;

 netdev_assert_locked(netdev);

 if (adapter->state <= __IAVF_DOWN_PENDING)
  return;

 netif_carrier_off(netdev);
 netif_tx_disable(netdev);
 adapter->link_up = false;
 iavf_napi_disable_all(adapter);
 iavf_irq_disable(adapter);

 iavf_clear_mac_vlan_filters(adapter);
 iavf_clear_cloud_filters(adapter);
 iavf_clear_fdir_filters(adapter);
 iavf_clear_adv_rss_conf(adapter);

 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
  return;

 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
  /* cancel any current operation */
  adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  /* Schedule operations to close down the HW. Don't wait
 * here for this to complete. The watchdog is still running
 * and it will take care of this.
 */

  if (!list_empty(&adapter->mac_filter_list))
   adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
  if (!list_empty(&adapter->vlan_filter_list))
   adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
  if (!list_empty(&adapter->cloud_filter_list))
   adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
  if (!list_empty(&adapter->fdir_list_head))
   adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
  if (!list_empty(&adapter->adv_rss_list_head))
   adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
 }

 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
}

/**
 * iavf_acquire_msix_vectors - Setup the MSIX capability
 * @adapter: board private structure
 * @vectors: number of vectors to request
 *
 * Work with the OS to set up the MSIX vectors needed.
 *
 * Returns 0 on success, negative on failure
 **/

static int
iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
{
 int err, vector_threshold;

 /* We'll want at least 3 (vector_threshold):
 * 0) Other (Admin Queue and link, mostly)
 * 1) TxQ[0] Cleanup
 * 2) RxQ[0] Cleanup
 */

 vector_threshold = MIN_MSIX_COUNT;

 /* The more we get, the more we will assign to Tx/Rx Cleanup
 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
 * Right now, we simply care about how many we'll get; we'll
 * set them up later while requesting irq's.
 */

 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
        vector_threshold, vectors);
 if (err < 0) {
  dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
  kfree(adapter->msix_entries);
  adapter->msix_entries = NULL;
  return err;
 }

 /* Adjust for only the vectors we'll use, which is minimum
 * of max_msix_q_vectors + NONQ_VECS, or the number of
 * vectors we were allocated.
 */

 adapter->num_msix_vectors = err;
 return 0;
}

/**
 * iavf_free_queues - Free memory for all rings
 * @adapter: board private structure to initialize
 *
 * Free all of the memory associated with queue pairs.
 **/

static void iavf_free_queues(struct iavf_adapter *adapter)
{
 if (!adapter->vsi_res)
  return;
 adapter->num_active_queues = 0;
 kfree(adapter->tx_rings);
 adapter->tx_rings = NULL;
 kfree(adapter->rx_rings);
 adapter->rx_rings = NULL;
}

/**
 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
 * @adapter: board private structure
 *
 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
 * stripped in certain descriptor fields. Instead of checking the offload
 * capability bits in the hot path, cache the location the ring specific
 * flags.
 */

void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
{
 int i;

 for (i = 0; i < adapter->num_active_queues; i++) {
  struct iavf_ring *tx_ring = &adapter->tx_rings[i];
  struct iavf_ring *rx_ring = &adapter->rx_rings[i];

  /* prevent multiple L2TAG bits being set after VFR */
  tx_ring->flags &=
   ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
     IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
  rx_ring->flags &=
   ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
     IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);

  if (VLAN_ALLOWED(adapter)) {
   tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
   rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
  } else if (VLAN_V2_ALLOWED(adapter)) {
   struct virtchnl_vlan_supported_caps *stripping_support;
   struct virtchnl_vlan_supported_caps *insertion_support;

   stripping_support =
    &adapter->vlan_v2_caps.offloads.stripping_support;
   insertion_support =
    &adapter->vlan_v2_caps.offloads.insertion_support;

   if (stripping_support->outer) {
    if (stripping_support->outer &
        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
     rx_ring->flags |=
      IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
    else if (stripping_support->outer &
      VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
     rx_ring->flags |=
      IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
   } else if (stripping_support->inner) {
    if (stripping_support->inner &
        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
     rx_ring->flags |=
      IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
    else if (stripping_support->inner &
      VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
     rx_ring->flags |=
      IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
   }

   if (insertion_support->outer) {
    if (insertion_support->outer &
        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
     tx_ring->flags |=
      IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
    else if (insertion_support->outer &
      VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
     tx_ring->flags |=
      IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
   } else if (insertion_support->inner) {
    if (insertion_support->inner &
        VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
     tx_ring->flags |=
      IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
    else if (insertion_support->inner &
      VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
     tx_ring->flags |=
      IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
   }
  }
 }
}

/**
 * iavf_alloc_queues - Allocate memory for all rings
 * @adapter: board private structure to initialize
 *
 * We allocate one ring per queue at run-time since we don't know the
 * number of queues at compile-time.  The polling_netdev array is
 * intended for Multiqueue, but should work fine with a single queue.
 **/

static int iavf_alloc_queues(struct iavf_adapter *adapter)
{
 int i, num_active_queues;

 /* If we're in reset reallocating queues we don't actually know yet for
 * certain the PF gave us the number of queues we asked for but we'll
 * assume it did.  Once basic reset is finished we'll confirm once we
 * start negotiating config with PF.
 */

 if (adapter->num_req_queues)
  num_active_queues = adapter->num_req_queues;
 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
   adapter->num_tc)
  num_active_queues = adapter->ch_config.total_qps;
 else
  num_active_queues = min_t(int,
       adapter->vsi_res->num_queue_pairs,
       (int)(num_online_cpus()));


 adapter->tx_rings = kcalloc(num_active_queues,
        sizeof(struct iavf_ring), GFP_KERNEL);
 if (!adapter->tx_rings)
  goto err_out;
 adapter->rx_rings = kcalloc(num_active_queues,
        sizeof(struct iavf_ring), GFP_KERNEL);
 if (!adapter->rx_rings)
  goto err_out;

 for (i = 0; i < num_active_queues; i++) {
  struct iavf_ring *tx_ring;
  struct iavf_ring *rx_ring;

  tx_ring = &adapter->tx_rings[i];

  tx_ring->queue_index = i;
  tx_ring->netdev = adapter->netdev;
  tx_ring->dev = &adapter->pdev->dev;
  tx_ring->count = adapter->tx_desc_count;
  tx_ring->itr_setting = IAVF_ITR_TX_DEF;
  if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
   tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;

  rx_ring = &adapter->rx_rings[i];
  rx_ring->queue_index = i;
  rx_ring->netdev = adapter->netdev;
  rx_ring->count = adapter->rx_desc_count;
  rx_ring->itr_setting = IAVF_ITR_RX_DEF;
 }

 adapter->num_active_queues = num_active_queues;

 iavf_set_queue_vlan_tag_loc(adapter);

 return 0;

err_out:
 iavf_free_queues(adapter);
 return -ENOMEM;
}

/**
 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/

static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
{
 int vector, v_budget;
 int pairs = 0;
 int err = 0;

 if (!adapter->vsi_res) {
  err = -EIO;
  goto out;
 }
 pairs = adapter->num_active_queues;

 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
 * us much good if we have more vectors than CPUs. However, we already
 * limit the total number of queues by the number of CPUs so we do not
 * need any further limiting here.
 */

 v_budget = min_t(int, pairs + NONQ_VECS,
    (int)adapter->vf_res->max_vectors);

 adapter->msix_entries = kcalloc(v_budget,
     sizeof(struct msix_entry), GFP_KERNEL);
 if (!adapter->msix_entries) {
  err = -ENOMEM;
  goto out;
 }

 for (vector = 0; vector < v_budget; vector++)
  adapter->msix_entries[vector].entry = vector;

 err = iavf_acquire_msix_vectors(adapter, v_budget);
 if (!err)
  iavf_schedule_finish_config(adapter);

out:
 return err;
}

/**
 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
 * @adapter: board private structure
 *
 * Return 0 on success, negative on failure
 **/

static int iavf_config_rss_aq(struct iavf_adapter *adapter)
{
 struct iavf_aqc_get_set_rss_key_data *rss_key =
  (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
 struct iavf_hw *hw = &adapter->hw;
 enum iavf_status status;

 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  /* bail because we already have a command pending */
  dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
   adapter->current_op);
  return -EBUSY;
 }

 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
 if (status) {
  dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
   iavf_stat_str(hw, status),
   libie_aq_str(hw->aq.asq_last_status));
  return iavf_status_to_errno(status);

 }

 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
         adapter->rss_lut, adapter->rss_lut_size);
 if (status) {
  dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
   iavf_stat_str(hw, status),
   libie_aq_str(hw->aq.asq_last_status));
  return iavf_status_to_errno(status);
 }

 return 0;

}

/**
 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
 * @adapter: board private structure
 *
 * Returns 0 on success, negative on failure
 **/

static int iavf_config_rss_reg(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;
 u32 *dw;
 u16 i;

 dw = (u32 *)adapter->rss_key;
 for (i = 0; i <= adapter->rss_key_size / 4; i++)
  wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);

 dw = (u32 *)adapter->rss_lut;
 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
  wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);

 iavf_flush(hw);

 return 0;
}

/**
 * iavf_config_rss - Configure RSS keys and lut
 * @adapter: board private structure
 *
 * Returns 0 on success, negative on failure
 **/

int iavf_config_rss(struct iavf_adapter *adapter)
{

 if (RSS_PF(adapter)) {
  adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
     IAVF_FLAG_AQ_SET_RSS_KEY;
  return 0;
 } else if (RSS_AQ(adapter)) {
  return iavf_config_rss_aq(adapter);
 } else {
  return iavf_config_rss_reg(adapter);
 }
}

/**
 * iavf_fill_rss_lut - Fill the lut with default values
 * @adapter: board private structure
 **/

static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
{
 u16 i;

 for (i = 0; i < adapter->rss_lut_size; i++)
  adapter->rss_lut[i] = i % adapter->num_active_queues;
}

/**
 * iavf_init_rss - Prepare for RSS
 * @adapter: board private structure
 *
 * Return 0 on success, negative on failure
 **/

static int iavf_init_rss(struct iavf_adapter *adapter)
{
 struct iavf_hw *hw = &adapter->hw;

 if (!RSS_PF(adapter)) {
  /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
  if (adapter->vf_res->vf_cap_flags &
      VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
   adapter->rss_hashcfg =
    IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
  else
   adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;

  wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
  wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
 }

 iavf_fill_rss_lut(adapter);
 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);

 return iavf_config_rss(adapter);
}

/**
 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
 * @adapter: board private structure to initialize
 *
 * We allocate one q_vector per queue interrupt.  If allocation fails we
 * return -ENOMEM.
 **/

static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
 int q_idx = 0, num_q_vectors, irq_num;
 struct iavf_q_vector *q_vector;

 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
         GFP_KERNEL);
 if (!adapter->q_vectors)
  return -ENOMEM;

 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
  q_vector = &adapter->q_vectors[q_idx];
  q_vector->adapter = adapter;
  q_vector->vsi = &adapter->vsi;
  q_vector->v_idx = q_idx;
  q_vector->reg_idx = q_idx;
  netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
          iavf_napi_poll, q_idx);
  netif_napi_set_irq_locked(&q_vector->napi, irq_num);
 }

 return 0;
}

/**
 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
 * @adapter: board private structure to initialize
 *
 * This function frees the memory allocated to the q_vectors.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/

static void iavf_free_q_vectors(struct iavf_adapter *adapter)
{
 int q_idx, num_q_vectors;

 if (!adapter->q_vectors)
  return;

 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;

 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
  struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];

  netif_napi_del_locked(&q_vector->napi);
 }
 kfree(adapter->q_vectors);
 adapter->q_vectors = NULL;
}

/**
 * iavf_reset_interrupt_capability - Reset MSIX setup
 * @adapter: board private structure
 *
 **/

static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
{
 if (!adapter->msix_entries)
  return;

 pci_disable_msix(adapter->pdev);
 kfree(adapter->msix_entries);
 adapter->msix_entries = NULL;
}

/**
 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
 * @adapter: board private structure to initialize
 *
 **/

static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
{
 int err;

 err = iavf_alloc_queues(adapter);
 if (err) {
  dev_err(&adapter->pdev->dev,
   "Unable to allocate memory for queues\n");
  goto err_alloc_queues;
 }

 err = iavf_set_interrupt_capability(adapter);
 if (err) {
  dev_err(&adapter->pdev->dev,
   "Unable to setup interrupt capabilities\n");
  goto err_set_interrupt;
 }

 err = iavf_alloc_q_vectors(adapter);
 if (err) {
  dev_err(&adapter->pdev->dev,
   "Unable to allocate memory for queue vectors\n");
  goto err_alloc_q_vectors;
 }

 /* If we've made it so far while ADq flag being ON, then we haven't
 * bailed out anywhere in middle. And ADq isn't just enabled but actual
 * resources have been allocated in the reset path.
 * Now we can truly claim that ADq is enabled.
 */

 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
     adapter->num_tc)
  dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
    adapter->num_tc);

 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
   (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
   adapter->num_active_queues);

 return 0;
err_alloc_q_vectors:
 iavf_reset_interrupt_capability(adapter);
err_set_interrupt:
 iavf_free_queues(adapter);
err_alloc_queues:
 return err;
}

/**
 * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does
 * @adapter: board private structure
 **/

static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter)
{
 iavf_free_q_vectors(adapter);
 iavf_reset_interrupt_capability(adapter);
 iavf_free_queues(adapter);
}

/**
 * iavf_free_rss - Free memory used by RSS structs
 * @adapter: board private structure
 **/

static void iavf_free_rss(struct iavf_adapter *adapter)
{
 kfree(adapter->rss_key);
 adapter->rss_key = NULL;

 kfree(adapter->rss_lut);
 adapter->rss_lut = NULL;
}

/**
 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
 * @adapter: board private structure
 * @running: true if adapter->state == __IAVF_RUNNING
 *
 * Returns 0 on success, negative on failure
 **/

static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
{
 struct net_device *netdev = adapter->netdev;
 int err;

 if (running)
  iavf_free_traffic_irqs(adapter);
 iavf_free_misc_irq(adapter);
 iavf_free_interrupt_scheme(adapter);

 err = iavf_init_interrupt_scheme(adapter);
 if (err)
  goto err;

 netif_tx_stop_all_queues(netdev);

 err = iavf_request_misc_irq(adapter);
 if (err)
  goto err;

 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);

 iavf_map_rings_to_vectors(adapter);
err:
 return err;
}

/**
 * iavf_finish_config - do all netdev work that needs RTNL
 * @work: our work_struct
 *
 * Do work that needs RTNL.
 */

static void iavf_finish_config(struct work_struct *work)
{
 struct iavf_adapter *adapter;
 bool netdev_released = false;
 int pairs, err;

 adapter = container_of(work, struct iavf_adapter, finish_config);

 /* Always take RTNL first to prevent circular lock dependency;
 * the dev->lock (== netdev lock) is needed to update the queue number.
 */

 rtnl_lock();
 netdev_lock(adapter->netdev);

 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
     adapter->netdev->reg_state == NETREG_REGISTERED &&
     !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
  netdev_update_features(adapter->netdev);
  adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
 }

 switch (adapter->state) {
 case __IAVF_DOWN:
  /* Set the real number of queues when reset occurs while
 * state == __IAVF_DOWN
 */

  pairs = adapter->num_active_queues;
  netif_set_real_num_rx_queues(adapter->netdev, pairs);
  netif_set_real_num_tx_queues(adapter->netdev, pairs);

  if (adapter->netdev->reg_state != NETREG_REGISTERED) {
   netdev_unlock(adapter->netdev);
   netdev_released = true;
   err = register_netdevice(adapter->netdev);
   if (err) {
    dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
     err);

    /* go back and try again.*/
    netdev_lock(adapter->netdev);
    iavf_free_rss(adapter);
    iavf_free_misc_irq(adapter);
    iavf_reset_interrupt_capability(adapter);
    iavf_change_state(adapter,
        __IAVF_INIT_CONFIG_ADAPTER);
    netdev_unlock(adapter->netdev);
    goto out;
   }
  }
  break;
 case __IAVF_RUNNING:
  pairs = adapter->num_active_queues;
  netif_set_real_num_rx_queues(adapter->netdev, pairs);
  netif_set_real_num_tx_queues(adapter->netdev, pairs);
  break;

 default:
  break;
 }

out:
 if (!netdev_released)
  netdev_unlock(adapter->netdev);
 rtnl_unlock();
}

/**
 * iavf_schedule_finish_config - Set the flags and schedule a reset event
 * @adapter: board private structure
 **/

void iavf_schedule_finish_config(struct iavf_adapter *adapter)
{
 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
  queue_work(adapter->wq, &adapter->finish_config);
}

/**
 * iavf_process_aq_command - process aq_required flags
 * and sends aq command
 * @adapter: pointer to iavf adapter structure
 *
 * Returns 0 on success
 * Returns error code if no command was sent
 * or error code if the command failed.
 **/

static int iavf_process_aq_command(struct iavf_adapter *adapter)
{
 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
  return iavf_send_vf_config_msg(adapter);
 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
  return iavf_send_vf_offload_vlan_v2_msg(adapter);
 if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
  return iavf_send_vf_supported_rxdids_msg(adapter);
 if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
  return iavf_send_vf_ptp_caps_msg(adapter);
 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
  iavf_disable_queues(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
  iavf_map_queues(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
  iavf_add_ether_addrs(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
  iavf_add_vlans(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
  iavf_del_ether_addrs(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
  iavf_del_vlans(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
  iavf_enable_vlan_stripping(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
  iavf_disable_vlan_stripping(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
  iavf_cfg_queues_bw(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
  iavf_get_qos_caps(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
  iavf_cfg_queues_quanta_size(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
  iavf_configure_queues(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
  iavf_enable_queues(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
  /* This message goes straight to the firmware, not the
 * PF, so we don't have to set current_op as we will
 * not get a response through the ARQ.
 */

  adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
  iavf_get_rss_hashcfg(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
  iavf_set_rss_hashcfg(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
  iavf_set_rss_key(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
  iavf_set_rss_lut(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
  iavf_set_rss_hfunc(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
  iavf_set_promiscuous(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
  iavf_enable_channels(adapter);
  return 0;
 }

 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
  iavf_disable_channels(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
  iavf_add_cloud_filter(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
  iavf_del_cloud_filter(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
  iavf_add_fdir_filter(adapter);
  return IAVF_SUCCESS;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
  iavf_del_fdir_filter(adapter);
  return IAVF_SUCCESS;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
  iavf_add_adv_rss_cfg(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
  iavf_del_adv_rss_cfg(adapter);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
  iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
  iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
  iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
  iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
  iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
  iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
  iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
  iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
  return 0;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
  iavf_virtchnl_send_ptp_cmd(adapter);
  return IAVF_SUCCESS;
 }
 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
  iavf_request_stats(adapter);
  return 0;
 }

 return -EAGAIN;
}

/**
 * iavf_set_vlan_offload_features - set VLAN offload configuration
 * @adapter: board private structure
 * @prev_features: previous features used for comparison
 * @features: updated features used for configuration
 *
 * Set the aq_required bit(s) based on the requested features passed in to
 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
 * the watchdog if any changes are requested to expedite the request via
 * virtchnl.
 **/

static void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
          netdev_features_t prev_features,
          netdev_features_t features)
{
 bool enable_stripping = true, enable_insertion = true;
 u16 vlan_ethertype = 0;
 u64 aq_required = 0;

 /* keep cases separate because one ethertype for offloads can be
 * disabled at the same time as another is disabled, so check for an
 * enabled ethertype first, then check for disabled. Default to
 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
 * stripping.
 */

 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
  vlan_ethertype = ETH_P_8021AD;
 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
  vlan_ethertype = ETH_P_8021Q;
 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
  vlan_ethertype = ETH_P_8021AD;
 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
  vlan_ethertype = ETH_P_8021Q;
 else
  vlan_ethertype = ETH_P_8021Q;

 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
  enable_stripping = false;
 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
  enable_insertion = false;

 if (VLAN_ALLOWED(adapter)) {
  /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
 * stripping via virtchnl. VLAN insertion can be toggled on the
 * netdev, but it doesn't require a virtchnl message
 */

  if (enable_stripping)
   aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
  else
   aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;

 } else if (VLAN_V2_ALLOWED(adapter)) {
  switch (vlan_ethertype) {
  case ETH_P_8021Q:
   if (enable_stripping)
    aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
   else
    aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;

   if (enable_insertion)
    aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
   else
    aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
   break;
  case ETH_P_8021AD:
   if (enable_stripping)
    aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
   else
    aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;

   if (enable_insertion)
    aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
   else
    aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
   break;
  }
 }

 if (aq_required)
  iavf_schedule_aq_request(adapter, aq_required);
}

/**
 * iavf_startup - first step of driver startup
 * @adapter: board private structure
 *
 * Function process __IAVF_STARTUP driver state.
 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
 * when fails the state is changed to __IAVF_INIT_FAILED
 **/

static void iavf_startup(struct iavf_adapter *adapter)
{
 struct pci_dev *pdev = adapter->pdev;
 struct iavf_hw *hw = &adapter->hw;
 enum iavf_status status;
 int ret;

 WARN_ON(adapter->state != __IAVF_STARTUP);

 /* driver loaded, probe complete */
 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;

 ret = iavf_check_reset_complete(hw);
 if (ret) {
  dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
    ret);
  goto err;
 }
 hw->aq.num_arq_entries = IAVF_AQ_LEN;
 hw->aq.num_asq_entries = IAVF_AQ_LEN;
 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;

 status = iavf_init_adminq(hw);
 if (status) {
  dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
   status);
  goto err;
 }
 ret = iavf_send_api_ver(adapter);
 if (ret) {
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=95 H=94 G=94

¤ Dauer der Verarbeitung: 0.19 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.