switch (reginfo->ofs) { case E1000_RDLEN(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RDLEN(n)); break; case E1000_RDH(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RDH(n)); break; case E1000_RDT(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RDT(n)); break; case E1000_RXDCTL(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RXDCTL(n)); break; case E1000_RDBAL(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RDBAL(n)); break; case E1000_RDBAH(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_RDBAH(n)); break; case E1000_TDBAL(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TDBAL(n)); break; case E1000_TDBAH(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TDBAH(n)); break; case E1000_TDLEN(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TDLEN(n)); break; case E1000_TDH(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TDH(n)); break; case E1000_TDT(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TDT(n)); break; case E1000_TXDCTL(0): for (n = 0; n < 4; n++)
regs[n] = rd32(E1000_TXDCTL(n)); break; default:
pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); return;
}
/** * igb_get_i2c_data - Reads the I2C SDA data bit * @data: opaque pointer to adapter struct * * Returns the I2C data bit value
**/ staticint igb_get_i2c_data(void *data)
{ struct igb_adapter *adapter = (struct igb_adapter *)data; struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
return !!(i2cctl & E1000_I2C_DATA_IN);
}
/** * igb_set_i2c_data - Sets the I2C data bit * @data: pointer to hardware structure * @state: I2C data value (0 or 1) to set * * Sets the I2C data bit
**/ staticvoid igb_set_i2c_data(void *data, int state)
{ struct igb_adapter *adapter = (struct igb_adapter *)data; struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
/** * igb_get_hw_dev - return device * @hw: pointer to hardware structure * * used by hardware layer to print debugging information
**/ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{ struct igb_adapter *adapter = hw->back; return adapter->netdev;
}
staticstruct pci_driver igb_driver;
/** * igb_init_module - Driver Registration Routine * * igb_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem.
**/ staticint __init igb_init_module(void)
{ int ret;
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier); #endif
ret = pci_register_driver(&igb_driver); #ifdef CONFIG_IGB_DCA if (ret)
dca_unregister_notify(&dca_notifier); #endif return ret;
}
module_init(igb_init_module);
/** * igb_exit_module - Driver Exit Cleanup Routine * * igb_exit_module is called just before the driver is removed * from memory.
**/ staticvoid __exit igb_exit_module(void)
{ #ifdef CONFIG_IGB_DCA
dca_unregister_notify(&dca_notifier); #endif
pci_unregister_driver(&igb_driver);
}
module_exit(igb_exit_module);
#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) /** * igb_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize * * Once we know the feature-set enabled for the device, we'll cache * the register offset the descriptor ring is assigned to.
**/ staticvoid igb_cache_ring_register(struct igb_adapter *adapter)
{ int i = 0, j = 0;
u32 rbase_offset = adapter->vfs_allocated_count;
switch (adapter->hw.mac.type) { case e1000_82576: /* The queues are allocated for virtualization such that VF 0 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. * In order to avoid collision we start at the first free queue * and continue consuming queues in the same sequence
*/ if (adapter->vfs_allocated_count) { for (; i < adapter->rss_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
fallthrough; case e1000_82575: case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: default: for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i; for (; j < adapter->num_tx_queues; j++)
adapter->tx_ring[j]->reg_idx = rbase_offset + j; break;
}
}
/* reads should not return all F's */ if (!(~value) && (!reg || !(~readl(hw_addr)))) { struct net_device *netdev = igb->netdev;
hw->hw_addr = NULL;
netdev_err(netdev, "PCIe link lost\n");
WARN(pci_device_is_present(igb->pdev), "igb: Failed to read reg 0x%x!\n", reg);
}
return value;
}
/** * igb_write_ivar - configure ivar for given MSI-X vector * @hw: pointer to the HW structure * @msix_vector: vector number we are allocating to a given ring * @index: row index of IVAR register to write within IVAR table * @offset: column offset of in IVAR, should be multiple of 8 * * This function is intended to handle the writing of the IVAR register * for adapters 82576 and newer. The IVAR table consists of 2 columns, * each containing an cause allocation for an Rx and Tx ring, and a * variable number of rows depending on the number of queues supported.
**/ staticvoid igb_write_ivar(struct e1000_hw *hw, int msix_vector, int index, int offset)
{
u32 ivar = array_rd32(E1000_IVAR0, index);
/* clear any bits that are currently set */
ivar &= ~((u32)0xFF << offset);
/* write vector and valid bit */
ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
if (q_vector->rx.ring)
rx_queue = q_vector->rx.ring->reg_idx; if (q_vector->tx.ring)
tx_queue = q_vector->tx.ring->reg_idx;
switch (hw->mac.type) { case e1000_82575: /* The 82575 assigns vectors using a bitmask, which matches the * bitmask for the EICR/EIMS/EIMC registers. To assign one * or more queues to a vector, we write the appropriate bits * into the MSIXBM register for that vector.
*/ if (rx_queue > IGB_N0_QUEUE)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm; break; case e1000_82576: /* 82576 uses a table that essentially consists of 2 columns * with 8 rows. The ordering is column-major so we use the * lower 3 bits as the row index, and the 4th bit as the * column offset.
*/ if (rx_queue > IGB_N0_QUEUE)
igb_write_ivar(hw, msix_vector,
rx_queue & 0x7,
(rx_queue & 0x8) << 1); if (tx_queue > IGB_N0_QUEUE)
igb_write_ivar(hw, msix_vector,
tx_queue & 0x7,
((tx_queue & 0x8) << 1) + 8);
q_vector->eims_value = BIT(msix_vector); break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: /* On 82580 and newer adapters the scheme is similar to 82576 * however instead of ordering column-major we have things * ordered row-major. So we traverse the table by using * bit 0 as the column offset, and the remaining bits as the * row index.
*/ if (rx_queue > IGB_N0_QUEUE)
igb_write_ivar(hw, msix_vector,
rx_queue >> 1,
(rx_queue & 0x1) << 4); if (tx_queue > IGB_N0_QUEUE)
igb_write_ivar(hw, msix_vector,
tx_queue >> 1,
((tx_queue & 0x1) << 4) + 8);
q_vector->eims_value = BIT(msix_vector); break; default:
BUG(); break;
}
/* add q_vector eims value to global eims_enable_mask */
adapter->eims_enable_mask |= q_vector->eims_value;
/* configure q_vector to set itr on first interrupt */
q_vector->set_itr = 1;
}
/** * igb_configure_msix - Configure MSI-X hardware * @adapter: board private structure to initialize * * igb_configure_msix sets up the hardware to properly * generate MSI-X interrupts.
**/ staticvoid igb_configure_msix(struct igb_adapter *adapter)
{
u32 tmp; int i, vector = 0; struct e1000_hw *hw = &adapter->hw;
adapter->eims_enable_mask = 0;
/* set vector for other causes, i.e. link changes */ switch (hw->mac.type) { case e1000_82575:
tmp = rd32(E1000_CTRL_EXT); /* enable MSI-X PBA support*/
tmp |= E1000_CTRL_EXT_PBA_CLR;
case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: /* Turn on MSI-X capability first, or our settings * won't stick. And it will take days to debug.
*/
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
E1000_GPIE_PBA | E1000_GPIE_EIAME |
E1000_GPIE_NSICR);
if (num_q_vectors > MAX_Q_VECTORS) {
num_q_vectors = MAX_Q_VECTORS;
dev_warn(&adapter->pdev->dev, "The number of queue vectors (%d) is higher than max allowed (%d)\n",
adapter->num_q_vectors, MAX_Q_VECTORS);
} for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i];
vector--; for (i = 0; i < vector; i++) {
free_irq(adapter->msix_entries[free_vector++].vector,
adapter->q_vector[i]);
}
err_out: return err;
}
/** * igb_free_q_vector - Free memory allocated for specific interrupt vector * @adapter: board private structure to initialize * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector.
**/ staticvoid igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
adapter->q_vector[v_idx] = NULL;
/* igb_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it.
*/ if (q_vector)
kfree_rcu(q_vector, rcu);
}
/** * igb_reset_q_vector - Reset config for interrupt vector * @adapter: board private structure to initialize * @v_idx: Index of vector to be reset * * If NAPI is enabled it will delete any references to the * NAPI struct. This is preparation for igb_free_q_vector.
**/ staticvoid igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
{ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
/* Coming from igb_set_interrupt_capability, the vectors are not yet * allocated. So, q_vector is NULL so we should stop here.
*/ if (!q_vector) return;
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
netif_napi_del(&q_vector->napi);
}
staticvoid igb_reset_interrupt_capability(struct igb_adapter *adapter)
{ int v_idx = adapter->num_q_vectors;
if (adapter->flags & IGB_FLAG_HAS_MSIX)
pci_disable_msix(adapter->pdev); elseif (adapter->flags & IGB_FLAG_HAS_MSI)
pci_disable_msi(adapter->pdev);
while (v_idx--)
igb_reset_q_vector(adapter, v_idx);
}
/** * igb_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector.
**/ staticvoid igb_free_q_vectors(struct igb_adapter *adapter)
{ int v_idx = adapter->num_q_vectors;
while (v_idx--) {
igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
}
}
/** * igb_clear_interrupt_scheme - reset the device to a state of no interrupts * @adapter: board private structure to initialize * * This function resets the device so that it has 0 Rx queues, Tx queues, and * MSI-X interrupts allocated.
*/ staticvoid igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
/** * igb_set_interrupt_capability - set MSI or MSI-X if supported * @adapter: board private structure to initialize * @msix: boolean value of MSIX capability * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ staticvoid igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{ int err; int numvecs, i;
if (!msix) goto msi_only;
adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count)
adapter->num_tx_queues = 1; else
adapter->num_tx_queues = adapter->rss_queues;
/* start with one vector for every Rx queue */
numvecs = adapter->num_rx_queues;
/* if Tx handler is separate add 1 for every Tx queue */ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
/* add 1 vector for link status interrupts */
numvecs++; for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
/* If we can't do MSI-X, try MSI */
msi_only:
adapter->flags &= ~IGB_FLAG_HAS_MSIX; #ifdef CONFIG_PCI_IOV /* disable SR-IOV for non MSI-X configurations */ if (adapter->vf_data) { struct e1000_hw *hw = &adapter->hw; /* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
msleep(500);
/** * igb_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize * @v_count: q_vectors allocated on adapter, used for ring interleaving * @v_idx: index of vector in adapter struct * @txr_count: total number of Tx rings to allocate * @txr_idx: index of first Tx ring to allocate * @rxr_count: total number of Rx rings to allocate * @rxr_idx: index of first Rx ring to allocate * * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/ staticint igb_alloc_q_vector(struct igb_adapter *adapter, int v_count, int v_idx, int txr_count, int txr_idx, int rxr_count, int rxr_idx)
{ struct igb_q_vector *q_vector; struct igb_ring *ring; int ring_count;
size_t size;
/* igb only supports 1 Tx and/or 1 Rx queue per vector */ if (txr_count > 1 || rxr_count > 1) return -ENOMEM;
/* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
/* On i350, i354, i210, and i211, loopback VLAN packets * have the tag byte-swapped.
*/ if (adapter->hw.mac.type >= e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
u64_stats_init(&ring->rx_syncp);
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
}
return 0;
}
/** * igb_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM.
**/ staticint igb_alloc_q_vectors(struct igb_adapter *adapter)
{ int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int rxr_idx = 0, txr_idx = 0, v_idx = 0; int err;
while (v_idx--)
igb_free_q_vector(adapter, v_idx);
return -ENOMEM;
}
/** * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors * @adapter: board private structure to initialize * @msix: boolean value of MSIX capability * * This function initializes the interrupts and allocates all of the queues.
**/ staticint igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{ struct pci_dev *pdev = adapter->pdev; int err;
igb_set_interrupt_capability(adapter, msix);
err = igb_alloc_q_vectors(adapter); if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); goto err_alloc_q_vectors;
}
/** * igb_request_irq - initialize interrupts * @adapter: board private structure to initialize * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ staticint igb_request_irq(struct igb_adapter *adapter)
{ struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err = 0;
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter); if (!err) goto request_done; /* fall back to MSI */
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
igb_clear_interrupt_scheme(adapter);
err = igb_init_interrupt_scheme(adapter, false); if (err) goto request_done;
for (i = 0; i < adapter->num_q_vectors; i++)
free_irq(adapter->msix_entries[vector++].vector,
adapter->q_vector[i]);
} else {
free_irq(adapter->pdev->irq, adapter);
}
}
/** * igb_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure
**/ staticvoid igb_irq_disable(struct igb_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
/* we need to be careful when disabling interrupts. The VFs are also * mapped into these registers and so clearing the bits can cause * issues on the VF drivers so we only need to clear what we set
*/ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */
igb_vfta_set(hw, vid, pf_id, true, true);
adapter->mng_vlan_id = vid;
} else {
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
(vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans)) { /* remove VID from filter table */
igb_vfta_set(hw, vid, pf_id, false, true);
}
}
/** * igb_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded.
**/ staticvoid igb_release_hw_control(struct igb_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
/* Let firmware take over control of h/w */
ctrl_ext = rd32(E1000_CTRL_EXT);
wr32(E1000_CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
}
/** * igb_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded.
**/ staticvoid igb_get_hw_control(struct igb_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
/* Let firmware know the driver has taken over */
ctrl_ext = rd32(E1000_CTRL_EXT);
wr32(E1000_CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
if (mode == QUEUE_MODE_STREAM_RESERVATION)
val |= E1000_TQAVCC_QUEUEMODE; else
val &= ~E1000_TQAVCC_QUEUEMODE;
wr32(E1000_I210_TQAVCC(queue), val);
}
staticbool is_any_cbs_enabled(struct igb_adapter *adapter)
{ int i;
for (i = 0; i < adapter->num_tx_queues; i++) { if (adapter->tx_ring[i]->cbs_enable) returntrue;
}
returnfalse;
}
staticbool is_any_txtime_enabled(struct igb_adapter *adapter)
{ int i;
for (i = 0; i < adapter->num_tx_queues; i++) { if (adapter->tx_ring[i]->launchtime_enable) returntrue;
}
returnfalse;
}
/** * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct * @queue: queue number * * Configure CBS and Launchtime for a given hardware queue. * Parameters are retrieved from the correct Tx ring, so * igb_save_cbs_params() and igb_save_txtime_params() should be used * for setting those correctly prior to this function being called.
**/ staticvoid igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{ struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
/* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and * as SP.
*/ if (ring->cbs_enable || ring->launchtime_enable) {
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
} else {
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
}
/* If CBS is enabled, set DataTranARB and config its parameters. */ if (ring->cbs_enable || queue == 0) { /* i210 does not allow the queue 0 to be in the Strict * Priority mode while the Qav mode is enabled, so, * instead of disabling strict priority mode, we give * queue 0 the maximum of credits possible. * * See section 8.12.19 of the i210 datasheet, "Note: * Queue0 QueueMode must be set to 1b when * TransmitMode is set to Qav."
*/ if (queue == 0 && !ring->cbs_enable) { /* max "linkspeed" idleslope in kbps */
ring->idleslope = 1000000;
ring->hicredit = ETH_FRAME_LEN;
}
/* Always set data transfer arbitration to credit-based * shaper algorithm on TQAVCTRL if CBS is enabled for any of * the queues.
*/
tqavctrl = rd32(E1000_I210_TQAVCTRL);
tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
wr32(E1000_I210_TQAVCTRL, tqavctrl);
/* According to i210 datasheet section 7.2.7.7, we should set * the 'idleSlope' field from TQAVCC register following the * equation: * * For 100 Mbps link speed: * * value = BW * 0x7735 * 0.2 (E1) * * For 1000Mbps link speed: * * value = BW * 0x7735 * 2 (E2) * * E1 and E2 can be merged into one equation as shown below. * Note that 'link-speed' is in Mbps. * * value = BW * 0x7735 * 2 * link-speed * -------------- (E3) * 1000 * * 'BW' is the percentage bandwidth out of full link speed * which can be found with the following equation. Note that * idleSlope here is the parameter from this function which * is in kbps. * * BW = idleSlope * ----------------- (E4) * link-speed * 1000 * * That said, we can come up with a generic equation to * calculate the value we should set it TQAVCC register by * replacing 'BW' in E3 by E4. The resulting equation is: * * value = idleSlope * 0x7735 * 2 * link-speed * ----------------- -------------- (E5) * link-speed * 1000 1000 * * 'link-speed' is present in both sides of the fraction so * it is canceled out. The final equation is the following: * * value = idleSlope * 61034 * ----------------- (E6) * 1000000 * * NOTE: For i210, given the above, we can see that idleslope * is represented in 16.38431 kbps units by the value at * the TQAVCC register (1Gbps / 61034), which reduces * the granularity for idleslope increments. * For instance, if you want to configure a 2576kbps * idleslope, the value to be written on the register * would have to be 157.23. If rounded down, you end * up with less bandwidth available than originally * required (~2572 kbps). If rounded up, you end up * with a higher bandwidth (~2589 kbps). Below the * approach we take is to always round up the * calculated value, so the resulting bandwidth might * be slightly higher for some configurations.
*/
value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
/* Set idleSlope to zero. */
tqavcc = rd32(E1000_I210_TQAVCC(queue));
tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
wr32(E1000_I210_TQAVCC(queue), tqavcc);
/* Set hiCredit to zero. */
wr32(E1000_I210_TQAVHC(queue), 0);
/* If CBS is not enabled for any queues anymore, then return to * the default state of Data Transmission Arbitration on * TQAVCTRL.
*/ if (!is_any_cbs_enabled(adapter)) {
tqavctrl = rd32(E1000_I210_TQAVCTRL);
tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
wr32(E1000_I210_TQAVCTRL, tqavctrl);
}
}
/* If LaunchTime is enabled, set DataTranTIM. */ if (ring->launchtime_enable) { /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled * for any of the SR queues, and configure fetchtime delta. * XXX NOTE: * - LaunchTime will be enabled for all SR queues. * - A fixed offset can be added relative to the launch * time of all packets if configured at reg LAUNCH_OS0. * We are keeping it as 0 for now (default value).
*/
tqavctrl = rd32(E1000_I210_TQAVCTRL);
tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
E1000_TQAVCTRL_FETCHTIME_DELTA;
wr32(E1000_I210_TQAVCTRL, tqavctrl);
} else { /* If Launchtime is not enabled for any SR queues anymore, * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, * effectively disabling Launchtime.
*/ if (!is_any_txtime_enabled(adapter)) {
tqavctrl = rd32(E1000_I210_TQAVCTRL);
tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
wr32(E1000_I210_TQAVCTRL, tqavctrl);
}
}
/* XXX: In i210 controller the sendSlope and loCredit parameters from * CBS are not configurable by software so we don't do any 'controller * configuration' in respect to these parameters.
*/
if (queue < 0 || queue > adapter->num_tx_queues) return -EINVAL;
ring = adapter->tx_ring[queue];
ring->launchtime_enable = enable;
return 0;
}
staticint igb_save_cbs_params(struct igb_adapter *adapter, int queue, bool enable, int idleslope, int sendslope, int hicredit, int locredit)
{ struct igb_ring *ring;
if (queue < 0 || queue > adapter->num_tx_queues) return -EINVAL;
/** * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable * @adapter: pointer to adapter struct * * Configure TQAVCTRL register switching the controller's Tx mode * if FQTSS mode is enabled or disabled. Additionally, will issue * a call to igb_config_tx_modes() per queue so any previously saved * Tx parameters are applied.
**/ staticvoid igb_setup_tx_mode(struct igb_adapter *adapter)
{ struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw;
u32 val;
/* Only i210 controller supports changing the transmission mode. */ if (hw->mac.type != e1000_i210) return;
if (is_fqtss_enabled(adapter)) { int i, max_queue;
/* Configure TQAVCTRL register: set transmit mode to 'Qav', * set data fetch arbitration to 'round robin', set SP_WAIT_SR * so SP queues wait for SR ones.
*/
val = rd32(E1000_I210_TQAVCTRL);
val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
val &= ~E1000_TQAVCTRL_DATAFETCHARB;
wr32(E1000_I210_TQAVCTRL, val);
/* Configure Tx and Rx packet buffers sizes as described in * i210 datasheet section 7.2.7.7.
*/
val = rd32(E1000_TXPBS);
val &= ~I210_TXPBSIZE_MASK;
val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
wr32(E1000_TXPBS, val);
val = rd32(E1000_RXPBS);
val &= ~I210_RXPBSIZE_MASK;
val |= I210_RXPBSIZE_PB_30KB;
wr32(E1000_RXPBS, val);
/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ * register should not exceed the buffer size programmed in * TXPBS. The smallest buffer size programmed in TXPBS is 4kB * so according to the datasheet we should set MAX_TPKT_SIZE to * 4kB / 64. * * However, when we do so, no frame from queue 2 and 3 are * transmitted. It seems the MAX_TPKT_SIZE should not be great * or _equal_ to the buffer size programmed in TXPBS. For this * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
/* Since FQTSS mode is enabled, apply any CBS configuration * previously set. If no previous CBS configuration has been * done, then the initial configuration is applied, which means * CBS is disabled.
*/
max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
adapter->num_tx_queues : I210_SR_QUEUES_NUM;
for (i = 0; i < max_queue; i++) {
igb_config_tx_modes(adapter, i);
}
} else {
wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
val = rd32(E1000_I210_TQAVCTRL); /* According to Section 8.12.21, the other flags we've set when * enabling FQTSS are not relevant when disabling FQTSS so we * don't set they here.
*/
val &= ~E1000_TQAVCTRL_XMIT_MODE;
wr32(E1000_I210_TQAVCTRL, val);
}
/* call igb_desc_unused which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean
*/ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; if (ring->xsk_pool)
igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
igb_desc_unused(ring)); else
igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
/** * igb_power_up_link - Power up the phy/serdes link * @adapter: address of board private structure
**/ void igb_power_up_link(struct igb_adapter *adapter)
{
igb_reset_phy(&adapter->hw);
if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_up_phy_copper(&adapter->hw); else
igb_power_up_serdes_link_82575(&adapter->hw);
igb_setup_link(&adapter->hw);
}
/** * igb_power_down_link - Power down the phy/serdes link * @adapter: address of board private structure
*/ staticvoid igb_power_down_link(struct igb_adapter *adapter)
{ if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_down_phy_copper_82575(&adapter->hw); else
igb_shutdown_serdes_link_82575(&adapter->hw);
}
/** * igb_check_swap_media - Detect and switch function for Media Auto Sense * @adapter: address of the board private structure
**/ staticvoid igb_check_swap_media(struct igb_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext, connsw; bool swap_now = false;
switch (hw->phy.media_type) { case e1000_media_type_copper:
netdev_info(adapter->netdev, "MAS: changing media to fiber/serdes\n");
ctrl_ext |=
E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
adapter->flags |= IGB_FLAG_MEDIA_RESET;
adapter->copper_tries = 0; break; case e1000_media_type_internal_serdes: case e1000_media_type_fiber:
netdev_info(adapter->netdev, "MAS: changing media to copper\n");
ctrl_ext &=
~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
adapter->flags |= IGB_FLAG_MEDIA_RESET; break; default: /* shouldn't get here during regular operation */
netdev_err(adapter->netdev, "AMS: Invalid media type found, returning\n"); break;
}
wr32(E1000_CTRL_EXT, ctrl_ext);
}
if (q_vector->rx.ring)
netif_queue_set_napi(adapter->netdev,
q_vector->rx.ring->queue_index,
NETDEV_QUEUE_TYPE_RX, napi);
if (q_vector->tx.ring)
netif_queue_set_napi(adapter->netdev,
q_vector->tx.ring->queue_index,
NETDEV_QUEUE_TYPE_TX, napi);
}
/** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure
**/ int igb_up(struct igb_adapter *adapter)
{ struct e1000_hw *hw = &adapter->hw; struct napi_struct *napi; int i;
/* hardware has been reset, we need to reload some things */
igb_configure(adapter);
clear_bit(__IGB_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++) {
napi = &adapter->q_vector[i]->napi;
napi_enable(napi);
igb_set_queue_napi(adapter, i, napi);
}
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter); else
igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
rd32(E1000_ICR);
igb_irq_enable(adapter);
/* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
/* disable transmits in the hardware */
tctl = rd32(E1000_TCTL);
tctl &= ~E1000_TCTL_EN;
wr32(E1000_TCTL, tctl); /* flush both disables and wait for them to finish */
wrfl();
usleep_range(10000, 11000);
igb_irq_disable(adapter);
adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.