// SPDX-License-Identifier: GPL-2.0-only /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2019 Solarflare Communications Inc. * Copyright 2020-2022 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference.
*/
/* __ef100_hard_start_xmit() will always return success even in the * case of TX drops, where it will increment efx's tx_dropped. The * efv stats really only count attempted TX, not success/failure.
*/
atomic64_inc(&efv->stats.tx_packets);
atomic64_add(skb->len, &efv->stats.tx_bytes);
netif_tx_lock(efx->net_dev);
rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
netif_tx_unlock(efx->net_dev); return rc;
}
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE; /* Look up actual mport ID */
rc = efx_mae_lookup_mport(efx, efv->idx, &efv->mport); if (rc) return rc;
pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport); /* mport label should fit in 16 bits */
WARN_ON(efv->mport >> 16);
nic_data->have_local_intf = false;
rc = efx_mae_enumerate_mports(efx); if (rc)
pci_warn(efx->pci_dev, "Could not enumerate mports (rc=%d), are we admin?",
rc);
}
INIT_LIST_HEAD(&head); /* Grab up to 'weight' pending SKBs */
spin_lock_bh(&efv->rx_lock);
read_index = efv->write_index; while (spent < weight && !list_empty(&efv->rx_list)) {
skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
list_del(&skb->list);
list_add_tail(&skb->list, &head);
spent++;
}
spin_unlock_bh(&efv->rx_lock); /* Receive them */
netif_receive_skb_list(&head); if (spent < weight) if (napi_complete_done(napi, spent)) {
spin_lock_bh(&efv->rx_lock);
efv->read_index = read_index; /* If write_index advanced while we were doing the * RX, then storing our read_index won't re-prime the * fake-interrupt. In that case, we need to schedule * NAPI again to consume the additional packet(s).
*/
need_resched = efv->write_index != read_index;
spin_unlock_bh(&efv->rx_lock); if (need_resched)
napi_schedule(&efv->napi);
} return spent;
}
/* Don't allow too many queued SKBs to build up, as they consume * GFP_ATOMIC memory. If we overrun, just start dropping.
*/ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
atomic64_inc(&efv->stats.rx_dropped); if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev, "nodesc-dropped packet of length %u\n",
rx_buf->len); return;
}
skb = netdev_alloc_skb(efv->net_dev, rx_buf->len); if (!skb) {
atomic64_inc(&efv->stats.rx_dropped); if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev, "noskb-dropped packet of length %u\n",
rx_buf->len); return;
}
memcpy(skb->data, eh, rx_buf->len);
__skb_put(skb, rx_buf->len);
skb_record_rx_queue(skb, 0); /* rep is single-queue */
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efv->net_dev);
/* Add it to the rx list */
spin_lock_bh(&efv->rx_lock);
primed = efv->read_index == efv->write_index;
list_add_tail(&skb->list, &efv->rx_list);
efv->write_index++;
spin_unlock_bh(&efv->rx_lock); /* Trigger rx work */ if (primed)
napi_schedule(&efv->napi);
}
/* spinlock guards against list mutation while we're walking it; * but caller must also hold rcu_read_lock() to ensure the netdev * isn't freed after we drop the spinlock.
*/
spin_lock_bh(&efx->vf_reps_lock);
list_for_each_entry(efv, &efx->vf_reps, list) if (efv->mport == mport) {
out = efv; break;
}
spin_unlock_bh(&efx->vf_reps_lock); return out;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.