/** * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task * @interface: fm10k private interface structure * * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be * started immediately, request that it be restarted when possible.
*/ void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
{ /* Avoid processing the MAC/VLAN queue when the service task is * disabled, or when we're resetting the device.
*/ if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
!test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
clear_bit(__FM10K_MACVLAN_REQUEST, interface->state); /* We delay the actual start of execution in order to allow * multiple MAC/VLAN updates to accumulate before handling * them, and to allow some time to let the mailbox drain * between runs.
*/
queue_delayed_work(fm10k_workqueue,
&interface->macvlan_task, 10);
} else {
set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
}
}
/** * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor * @interface: fm10k private interface structure * * Wait until the MAC/VLAN queue task has stopped, and cancel any future * requests.
*/ staticvoid fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
{ /* Disable the MAC/VLAN work item */
set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
/* Make sure we waited until any current invocations have stopped */
cancel_delayed_work_sync(&interface->macvlan_task);
/* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task. * However, it may not be unset of the MAC/VLAN task never actually * got a chance to run. Since we've canceled the task here, and it * cannot be rescheuled right now, we need to ensure the scheduled bit * gets unset.
*/
clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
}
/** * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor * @interface: fm10k private interface structure * * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule * the MAC/VLAN work monitor.
*/ staticvoid fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
{ /* Re-enable the MAC/VLAN work item */
clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
/* We might have received a MAC/VLAN request while disabled. If so, * kick off the queue now.
*/ if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
fm10k_macvlan_schedule(interface);
}
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
clear_bit(__FM10K_SERVICE_SCHED, interface->state);
/* If a service event was requested since we started, immediately * re-schedule now. This ensures we don't drop a request until the * next timer event.
*/ if (test_bit(__FM10K_SERVICE_REQUEST, interface->state))
fm10k_service_event_schedule(interface);
}
/* It's possible that cancel_work_sync stopped the service task from * running before it could actually start. In this case the * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that * the service task cannot be running at this point, we need to clear * the scheduled bit, as otherwise the service task may never be * restarted.
*/
clear_bit(__FM10K_SERVICE_SCHED, interface->state);
}
/** * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset * @interface: fm10k private data structure * * This function prepares for a device reset by shutting as much down as we * can. It does nothing and returns false if __FM10K_RESETTING was already set * prior to calling this function. It returns true if it actually did work.
*/ staticbool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{ struct net_device *netdev = interface->netdev;
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
/* Nothing to do if a reset is already in progress */ if (test_and_set_bit(__FM10K_RESETTING, interface->state)) returnfalse;
/* As the MAC/VLAN task will be accessing registers it must not be * running while we reset. Although the task will not be scheduled * once we start resetting it may already be running
*/
fm10k_stop_macvlan_task(interface);
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw); if (err) {
dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); goto reinit_err;
}
/* do nothing if netdev is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) return;
/* We've lost the PCIe register space, and can no longer access the * device. Shut everything except the detach subtask down and prepare * to reset the device in case we recover. If we actually prepare for * reset, indicate that we're detached.
*/ if (fm10k_prepare_for_reset(interface))
set_bit(__FM10K_RESET_DETACHED, interface->state);
/* check the real address space to see if we've recovered */
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr); if (~value) { int err;
/* Make sure the reset was initiated because we detached, * otherwise we might race with a different reset flow.
*/ if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
interface->state)) return;
/* Restore the hardware address */
interface->hw.hw_addr = interface->uc_addr;
/* PCIe link has been restored, and the device is active * again. Restore everything and reset the device.
*/
err = fm10k_handle_reset(interface); if (err) {
netdev_err(netdev, "Unable to reset device: %d\n", err);
interface->hw.hw_addr = NULL; return;
}
/* Re-attach the netdev */
netif_device_attach(netdev);
netdev_warn(netdev, "PCIe link restored, device now attached\n"); return;
}
}
staticvoid fm10k_reset_subtask(struct fm10k_intfc *interface)
{ int err;
if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
interface->flags)) return;
/* If another thread has already prepared to reset the device, we * should not attempt to handle a reset here, since we'd race with * that thread. This may happen if we suspend the device or if the * PCIe link is lost. In this case, we'll just ignore the RESET * request, as it will (eventually) be taken care of when the thread * which actually started the reset is finished.
*/ if (!fm10k_prepare_for_reset(interface)) return;
/** * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping * @interface: board private structure * * Configure the SWPRI to PC mapping for the port.
**/ staticvoid fm10k_configure_swpri_map(struct fm10k_intfc *interface)
{ struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; int i;
/* clear flag indicating update is needed */
clear_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
/* these registers are only available on the PF */ if (hw->mac.type != fm10k_mac_pf) return;
/* configure SWPRI to PC map */ for (i = 0; i < FM10K_SWPRI_MAX; i++)
fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
netdev_get_prio_tc_map(netdev, i));
}
/** * fm10k_watchdog_update_host_state - Update the link status based on host. * @interface: board private structure
**/ staticvoid fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
{ struct fm10k_hw *hw = &interface->hw;
s32 err;
if (test_bit(__FM10K_LINK_DOWN, interface->state)) {
interface->host_ready = false; if (time_is_after_jiffies(interface->link_down_event)) return;
clear_bit(__FM10K_LINK_DOWN, interface->state);
}
if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) { if (rtnl_trylock()) {
fm10k_configure_swpri_map(interface);
rtnl_unlock();
}
}
/* lock the mailbox for transmit and receive */
fm10k_mbx_lock(interface);
err = hw->mac.ops.get_host_state(hw, &interface->host_ready); if (err && time_is_before_jiffies(interface->last_reset))
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* free the lock */
fm10k_mbx_unlock(interface);
}
/** * fm10k_mbx_subtask - Process upstream and downstream mailboxes * @interface: board private structure * * This function will process both the upstream and downstream mailboxes.
**/ staticvoid fm10k_mbx_subtask(struct fm10k_intfc *interface)
{ /* If we're resetting, bail out */ if (test_bit(__FM10K_RESETTING, interface->state)) return;
/* process upstream mailbox and update device state */
fm10k_watchdog_update_host_state(interface);
/* process downstream mailboxes */
fm10k_iov_mbx(interface);
}
/** * fm10k_watchdog_host_is_ready - Update netdev status based on host ready * @interface: board private structure
**/ staticvoid fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
{ struct net_device *netdev = interface->netdev;
/* only continue if link state is currently down */ if (netif_carrier_ok(netdev)) return;
netif_info(interface, drv, netdev, "NIC Link is up\n");
/* ensure only one thread updates stats at a time */ if (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state)) return;
/* do not allow stats update via service task for next second */
interface->next_stats_update = jiffies + HZ;
/* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
/* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
/** * fm10k_watchdog_flush_tx - flush queues on host not ready * @interface: pointer to the device interface structure
**/ staticvoid fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
{ int some_tx_pending = 0; int i;
/* nothing to do if carrier is up */ if (netif_carrier_ok(interface->netdev)) return;
for (i = 0; i < interface->num_tx_queues; i++) { struct fm10k_ring *tx_ring = interface->tx_ring[i];
/* We've lost link, so the controller stops DMA, but we've got * queued Tx work that's never going to get done, so reset * controller to flush Tx.
*/ if (some_tx_pending)
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
}
/** * fm10k_watchdog_subtask - check and bring link up * @interface: pointer to the device interface structure
**/ staticvoid fm10k_watchdog_subtask(struct fm10k_intfc *interface)
{ /* if interface is down do nothing */ if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state)) return;
if (interface->host_ready)
fm10k_watchdog_host_is_ready(interface); else
fm10k_watchdog_host_not_ready(interface);
/* update stats only once every second */ if (time_is_before_jiffies(interface->next_stats_update))
fm10k_update_stats(interface);
/* flush any uncompleted work */
fm10k_watchdog_flush_tx(interface);
}
/** * fm10k_check_hang_subtask - check for hung queues and dropped interrupts * @interface: pointer to the device interface structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred.
*/ staticvoid fm10k_check_hang_subtask(struct fm10k_intfc *interface)
{ /* If we're down or resetting, just bail */ if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state)) return;
/* rate limit tx hang checks to only once every 2 seconds */ if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) return;
interface->next_tx_hang_check = jiffies + (2 * HZ);
if (netif_carrier_ok(interface->netdev)) { int i;
/* Force detection of hung controller */ for (i = 0; i < interface->num_tx_queues; i++)
set_check_for_tx_hang(interface->tx_ring[i]);
/* Rearm all in-use q_vectors for immediate firing */ for (i = 0; i < interface->num_q_vectors; i++) { struct fm10k_q_vector *qv = interface->q_vector[i];
/* Check whether we're detached first */
fm10k_detach_subtask(interface);
/* tasks run even when interface is down */
fm10k_mbx_subtask(interface);
fm10k_reset_subtask(interface);
/* tasks only run when interface is up */
fm10k_watchdog_subtask(interface);
fm10k_check_hang_subtask(interface);
/* release lock on service events to allow scheduling next event */
fm10k_service_event_complete(interface);
}
/** * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager * @work: pointer to work_struct containing our data * * This work item handles sending MAC/VLAN updates to the switch manager. When * the interface is up, it will attempt to queue mailbox messages to the * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the * mailbox is full, it will reschedule itself to try again in a short while. * This ensures that the driver does not overload the switch mailbox with too * many simultaneous requests, causing an unnecessary reset.
**/ staticvoid fm10k_macvlan_task(struct work_struct *work)
{ struct fm10k_macvlan_request *item; struct fm10k_intfc *interface; struct delayed_work *dwork; struct list_head *requests; struct fm10k_hw *hw; unsignedlong flags;
do { /* Pop the first item off the list */
spin_lock_irqsave(&interface->macvlan_lock, flags);
item = list_first_entry_or_null(requests, struct fm10k_macvlan_request,
list); if (item)
list_del_init(&item->list);
/* We have no more items to process */ if (!item) goto done;
fm10k_mbx_lock(interface);
/* Check that we have plenty of space to send the message. We * want to ensure that the mailbox stays low enough to avoid a * change in the host state, otherwise we may see spurious * link up / link down notifications.
*/ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
hw->mbx.ops.process(hw, &hw->mbx);
set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
fm10k_mbx_unlock(interface);
/* Put the request back on the list */
spin_lock_irqsave(&interface->macvlan_lock, flags);
list_add(&item->list, requests);
spin_unlock_irqrestore(&interface->macvlan_lock, flags); break;
}
switch (item->type) { case FM10K_MC_MAC_REQUEST:
hw->mac.ops.update_mc_addr(hw,
item->mac.glort,
item->mac.addr,
item->mac.vid,
item->set); break; case FM10K_UC_MAC_REQUEST:
hw->mac.ops.update_uc_addr(hw,
item->mac.glort,
item->mac.addr,
item->mac.vid,
item->set,
0); break; case FM10K_VLAN_REQUEST:
hw->mac.ops.update_vlan(hw,
item->vlan.vid,
item->vlan.vsi,
item->set); break; default: break;
}
fm10k_mbx_unlock(interface);
/* Free the item now that we've sent the update */
kfree(item);
} while (true);
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
/* If a MAC/VLAN request was scheduled since we started, we should * re-schedule. However, there is no reason to re-schedule if there is * no work to do.
*/ if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
fm10k_macvlan_schedule(interface);
}
/** * fm10k_configure_tx_ring - Configure Tx ring after Reset * @interface: board private structure * @ring: structure containing ring specific data * * Configure the Tx descriptor ring after a reset.
**/ staticvoid fm10k_configure_tx_ring(struct fm10k_intfc *interface, struct fm10k_ring *ring)
{ struct fm10k_hw *hw = &interface->hw;
u64 tdba = ring->dma;
u32 size = ring->count * sizeof(struct fm10k_tx_desc);
u32 txint = FM10K_INT_MAP_DISABLE;
u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
/* set location and size for descriptor ring */
fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
/* reset head and tail pointers */
fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
/* store tail pointer */
ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
/* reset ntu and ntc to place SW in sync with hardware */
ring->next_to_clean = 0;
ring->next_to_use = 0;
/** * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration * @interface: board private structure * @ring: structure containing ring specific data * * Verify the Tx descriptor ring is ready for transmit.
**/ staticvoid fm10k_enable_tx_ring(struct fm10k_intfc *interface, struct fm10k_ring *ring)
{ struct fm10k_hw *hw = &interface->hw; int wait_loop = 10;
u32 txdctl;
u8 reg_idx = ring->reg_idx;
/* if we are already enabled just exit */ if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) return;
/* poll to verify queue is enabled */ do {
usleep_range(1000, 2000);
txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
} while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); if (!wait_loop)
netif_err(interface, drv, interface->netdev, "Could not enable Tx Queue %d\n", reg_idx);
}
/** * fm10k_configure_tx - Configure Transmit Unit after Reset * @interface: board private structure * * Configure the Tx unit of the MAC after a reset.
**/ staticvoid fm10k_configure_tx(struct fm10k_intfc *interface)
{ int i;
/* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < interface->num_tx_queues; i++)
fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
/* poll here to verify that Tx rings are now enabled */ for (i = 0; i < interface->num_tx_queues; i++)
fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
}
/** * fm10k_configure_rx_ring - Configure Rx ring after Reset * @interface: board private structure * @ring: structure containing ring specific data * * Configure the Rx descriptor ring after a reset.
**/ staticvoid fm10k_configure_rx_ring(struct fm10k_intfc *interface, struct fm10k_ring *ring)
{
u64 rdba = ring->dma; struct fm10k_hw *hw = &interface->hw;
u32 size = ring->count * sizeof(union fm10k_rx_desc);
u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
u32 rxint = FM10K_INT_MAP_DISABLE;
u8 rx_pause = interface->rx_pause;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx));
rxqctl &= ~FM10K_RXQCTL_ENABLE;
fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
fm10k_write_flush(hw);
/* possible poll here to verify ring resources have been cleaned */
/* set location and size for descriptor ring */
fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
/* reset head and tail pointers */
fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
/* store tail pointer */
ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
/* reset ntu and ntc to place SW in sync with hardware */
ring->next_to_clean = 0;
ring->next_to_use = 0;
ring->next_to_alloc = 0;
/* Configure the Rx buffer size for one buff without split */
srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
/* Configure the Rx ring to suppress loopback packets */
srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
/* Enable drop on empty */ #ifdef CONFIG_DCB if (interface->pfc_en)
rx_pause = interface->pfc_en; #endif if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
/* assign glort value for RSS/DCB specific to this interface */
memset(&dglort, 0, sizeof(dglort));
dglort.inner_rss = 1;
dglort.glort = interface->glort;
dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); /* configure DGLORT mapping for RSS/DCB */
dglort.idx = fm10k_dglort_pf_rss; if (interface->l2_accel)
dglort.shared_l = fls(interface->l2_accel->size);
hw->mac.ops.configure_dglort_map(hw, &dglort);
}
/** * fm10k_configure_rx - Configure Receive Unit after Reset * @interface: board private structure * * Configure the Rx unit of the MAC after a reset.
**/ staticvoid fm10k_configure_rx(struct fm10k_intfc *interface)
{ int i;
/* Configure SWPRI to PC map */
fm10k_configure_swpri_map(interface);
/* Configure RSS and DGLORT map */
fm10k_configure_dglort(interface);
/* Setup the HW Rx Head and Tail descriptor pointers */ for (i = 0; i < interface->num_rx_queues; i++)
fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
/* possible poll here to verify that Rx rings are now enabled */
}
staticvoid fm10k_napi_enable_all(struct fm10k_intfc *interface)
{ struct fm10k_q_vector *q_vector; int q_idx;
/* For VF faults, clear out the respective LPORT, reset the queue * resources, and then reconnect to the mailbox. This allows the * VF in question to resume behavior. For transient faults that are * the result of non-malicious behavior this will log the fault and * allow the VF to resume functionality. Obviously for malicious VFs * they will be able to attempt malicious behavior again. In this * case, the system administrator will need to step in and manually * remove or disable the VF in question.
*/ if (fault->func && iov_data) { int vf = fault->func - 1; struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
eicr;
eicr >>= 1, type += FM10K_FAULT_SIZE) { /* only check if there is an error reported */ if (!(eicr & 0x1)) continue;
/* retrieve fault info */
err = hw->mac.ops.get_fault(hw, type, &fault); if (err) {
dev_err(&interface->pdev->dev, "error reading fault\n"); continue;
}
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
FM10K_EICR_SWITCHREADY |
FM10K_EICR_SWITCHNOTREADY));
/* report any faults found to the message log */
fm10k_report_fault(interface, eicr);
/* reset any queues disabled due to receiver overrun */
fm10k_reset_drop_on_empty(interface, eicr);
/* service mailboxes */ if (fm10k_mbx_trylock(interface)) {
s32 err = mbx->ops.process(hw, mbx);
if (err == FM10K_ERR_RESET_REQUESTED)
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* if switch toggled state we should reset GLORTs */ if (eicr & FM10K_EICR_SWITCHNOTREADY) { /* force link down for at least 4 seconds */
interface->link_down_event = jiffies + (4 * HZ);
set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
}
/* we should validate host state after interrupt event */
hw->mac.get_host_state = true;
/* validate host state, and handle VF mailboxes in the service task */
fm10k_service_event_schedule(interface);
/* MAC was changed so we need reset */ if (is_valid_ether_addr(hw->mac.perm_addr) &&
!ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/* VLAN override was changed, or default VLAN changed */ if ((vlan_override != hw->mac.vlan_override) ||
(default_vid != hw->mac.default_vid))
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
err = fm10k_msg_err_pf(hw, results, mbx); if (!err && hw->swapi.status) { /* force link down for a reasonable delay */
interface->link_down_event = jiffies + (2 * HZ);
set_bit(__FM10K_LINK_DOWN, interface->state);
/* reset dglort_map back to no config */
hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
dev_warn(&interface->pdev->dev, "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
dev_warn(&interface->pdev->dev, "request logical port map failed: %d\n",
hw->swapi.status);
return 0;
}
err = fm10k_msg_lport_map_pf(hw, results, mbx); if (err) return err;
interface->lport_map_failed = false;
/* we need to reset if port count was just updated */ if (dglort_map != hw->mac.dglort_map)
set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
/** * fm10k_qv_request_irq - initialize interrupts for queue vectors * @interface: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel.
**/ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
{ struct net_device *dev = interface->netdev; struct fm10k_hw *hw = &interface->hw; struct msix_entry *entry; unsignedint ri = 0, ti = 0; int vector, err;
/* capture stats one last time before stopping interface */
fm10k_update_stats(interface);
/* prevent updating statistics while we're down */ while (test_and_set_bit(__FM10K_UPDATING_STATS, interface->state))
usleep_range(1000, 2000);
/* skip waiting for TX DMA if we lost PCIe link */ if (FM10K_REMOVED(hw->hw_addr)) goto skip_tx_dma_drain;
/* In some rare circumstances it can take a while for Tx queues to * quiesce and be fully disabled. Attempt to .stop_hw() first, and * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop * until the Tx queues have emptied, or until a number of retries. If * we fail to clear within the retry loop, we will issue a warning * indicating that Tx DMA is probably hung. Note this means we call * .stop_hw() twice but this shouldn't cause any problems.
*/
err = hw->mac.ops.stop_hw(hw); if (err != FM10K_ERR_REQUESTS_PENDING) goto skip_tx_dma_drain;
/* start checking at the last ring to have pending Tx */ for (; i < interface->num_tx_queues; i++) if (fm10k_get_tx_pending(interface->tx_ring[i], false)) break;
/* if all the queues are drained, we can break now */ if (i == interface->num_tx_queues) break;
}
if (count >= TX_DMA_DRAIN_RETRIES)
dev_err(&interface->pdev->dev, "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
count);
skip_tx_dma_drain: /* Disable DMA engine for Tx/Rx */
err = hw->mac.ops.stop_hw(hw); if (err == FM10K_ERR_REQUESTS_PENDING)
dev_err(&interface->pdev->dev, "due to pending requests hw was not shut down gracefully\n"); elseif (err)
dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
fm10k_clean_all_rx_rings(interface);
}
/** * fm10k_sw_init - Initialize general software structures * @interface: host interface private structure to initialize * @ent: PCI device ID entry * * fm10k_sw_init initializes the interface private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size).
**/ staticint fm10k_sw_init(struct fm10k_intfc *interface, conststruct pci_device_id *ent)
{ conststruct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; struct fm10k_hw *hw = &interface->hw; struct pci_dev *pdev = interface->pdev; struct net_device *netdev = interface->netdev;
u32 rss_key[FM10K_RSSRK_SIZE]; unsignedint rss; int err;
/* Setup IOV handlers */ if (fi->iov_ops)
memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
/* Set common capability flags and settings */
rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
interface->ring_feature[RING_F_RSS].limit = rss;
fi->get_invariants(hw);
/* pick up the PCIe bus settings for reporting later */ if (hw->mac.ops.get_bus_info)
hw->mac.ops.get_bus_info(hw);
/* limit the usable DMA range */ if (hw->mac.ops.set_dma_mask)
hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
/* update netdev with DMA restrictions */ if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw); if (err) {
dev_err(&pdev->dev, "reset_hw failed: %d\n", err); return err;
}
/* Set upper limit on IOV VFs that can be allocated */
pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
/* Start with random Ethernet address */
eth_random_addr(hw->mac.addr);
/* Initialize MAC address from hardware */
err = hw->mac.ops.read_mac_addr(hw); if (err) {
dev_warn(&pdev->dev, "Failed to obtain MAC address defaulting to random\n"); /* tag address assignment as random */
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
err = fm10k_sw_init(interface, ent); if (err) goto err_sw_init;
/* enable debugfs support */
fm10k_dbg_intfc_init(interface);
err = fm10k_init_queueing_scheme(interface); if (err) goto err_sw_init;
/* the mbx interrupt might attempt to schedule the service task, so we * must ensure it is disabled since we haven't yet requested the timer * or work item.
*/
set_bit(__FM10K_SERVICE_DISABLE, interface->state);
err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_interrupt;
/* final check of hardware state before registering the interface */
err = fm10k_hw_ready(interface); if (err) goto err_register;
err = register_netdev(netdev); if (err) goto err_register;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
/* stop all the transmit queues from transmitting until link is up */
netif_tx_stop_all_queues(netdev);
/* Initialize service timer and service task late in order to avoid * cleanup issues.
*/
timer_setup(&interface->service_timer, fm10k_service_timer, 0);
INIT_WORK(&interface->service_task, fm10k_service_task);
/* Setup the MAC/VLAN queue */
INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
/* kick off service timer now, even when interface is down */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
pcie_print_link_status(interface->pdev);
/* report MAC address for logging */
dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
/* enable SR-IOV after registering netdev to enforce PF/VF ordering */
fm10k_iov_configure(pdev, 0);
/* clear the service task disable bit and kick off service task */
clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
fm10k_service_event_schedule(interface);
/** * fm10k_remove - Device Removal Routine * @pdev: PCI device information struct * * fm10k_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory.
**/ staticvoid fm10k_remove(struct pci_dev *pdev)
{ struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev;
/* remove any debugfs interfaces */
fm10k_dbg_intfc_exit(interface);
if (interface->sw_addr)
iounmap(interface->sw_addr);
iounmap(interface->uc_addr);
free_netdev(netdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
staticvoid fm10k_prepare_suspend(struct fm10k_intfc *interface)
{ /* the watchdog task reads from registers, which might appear like * a surprise remove if the PCIe device is disabled while we're * stopped. We stop the watchdog task until after we resume software * activity. * * Note that the MAC/VLAN task will be stopped as part of preparing * for reset so we don't need to handle it here.
*/
fm10k_stop_service_event(interface);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.