// SPDX-License-Identifier: GPL-2.0-or-later
/* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*/
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/dim.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
#include <net/netdev_rx_queue.h>
#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int , 0444);
static bool csum = true , gso = true , napi_tx = true ;
module_param(csum, bool , 0444);
module_param(gso, bool , 0444);
module_param(napi_tx, bool , 0644);
#define VIRTIO_OFFLOAD_MAP_MIN 46
#define VIRTIO_OFFLOAD_MAP_MAX 47
#define VIRTIO_FEATURES_MAP_MIN 65
#define VIRTIO_O2F_DELTA (VIRTIO_FEATURES_MAP_MIN - \
VIRTIO_OFFLOAD_MAP_MIN)
static bool virtio_is_mapped_offload(unsigned int obit)
{
return obit >= VIRTIO_OFFLOAD_MAP_MIN &&
obit <= VIRTIO_OFFLOAD_MAP_MAX;
}
static unsigned int virtio_offload_to_feature(unsigned int obit)
{
return virtio_is_mapped_offload(obit) ? obit + VIRTIO_O2F_DELTA : obit;
}
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
#define GOOD_COPY_LEN 128
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
* term, transient changes in packet size.
*/
DECLARE_EWMA(pkt_len, 0, 64)
#define VIRTNET_DRIVER_VERSION "1.0.0"
static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_TSO4,
VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN,
VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GUEST_USO4,
VIRTIO_NET_F_GUEST_USO6,
VIRTIO_NET_F_GUEST_HDRLEN,
VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED,
VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED,
};
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO) | \
(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
(1ULL << VIRTIO_NET_F_GUEST_USO6) | \
(1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | \
(1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED))
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
size_t qstat_offset;
};
struct virtnet_sq_free_stats {
u64 packets;
u64 bytes;
u64 napi_packets;
u64 napi_bytes;
u64 xsk;
};
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64_stats_t packets;
u64_stats_t bytes;
u64_stats_t xdp_tx;
u64_stats_t xdp_tx_drops;
u64_stats_t kicks;
u64_stats_t tx_timeouts;
u64_stats_t stop;
u64_stats_t wake;
};
struct virtnet_rq_stats {
struct u64_stats_sync syncp;
u64_stats_t packets;
u64_stats_t bytes;
u64_stats_t drops;
u64_stats_t xdp_packets;
u64_stats_t xdp_tx;
u64_stats_t xdp_redirects;
u64_stats_t xdp_drops;
u64_stats_t kicks;
};
#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
#define VIRTNET_SQ_STAT_QSTAT(name, m) \
{ \
name, \
offsetof(struct virtnet_sq_stats, m), \
offsetof(struct netdev_queue_stats_tx, m), \
}
#define VIRTNET_RQ_STAT_QSTAT(name, m) \
{ \
name, \
offsetof(struct virtnet_rq_stats, m), \
offsetof(struct netdev_queue_stats_rx, m), \
}
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
VIRTNET_SQ_STAT("xdp_tx" , xdp_tx),
VIRTNET_SQ_STAT("xdp_tx_drops" , xdp_tx_drops),
VIRTNET_SQ_STAT("kicks" , kicks),
VIRTNET_SQ_STAT("tx_timeouts" , tx_timeouts),
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
VIRTNET_RQ_STAT("drops" , drops),
VIRTNET_RQ_STAT("xdp_packets" , xdp_packets),
VIRTNET_RQ_STAT("xdp_tx" , xdp_tx),
VIRTNET_RQ_STAT("xdp_redirects" , xdp_redirects),
VIRTNET_RQ_STAT("xdp_drops" , xdp_drops),
VIRTNET_RQ_STAT("kicks" , kicks),
};
static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
VIRTNET_SQ_STAT_QSTAT("packets" , packets),
VIRTNET_SQ_STAT_QSTAT("bytes" , bytes),
VIRTNET_SQ_STAT_QSTAT("stop" , stop),
VIRTNET_SQ_STAT_QSTAT("wake" , wake),
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
VIRTNET_RQ_STAT_QSTAT("packets" , packets),
VIRTNET_RQ_STAT_QSTAT("bytes" , bytes),
};
#define VIRTNET_STATS_DESC_CQ(name) \
{#name , offsetof(struct virtio_net_stats_cvq, name), -1}
#define VIRTNET_STATS_DESC_RX(class , name) \
{#name , offsetof(struct virtio_net_stats_rx_ ## class , rx_ ## name), -1}
#define VIRTNET_STATS_DESC_TX(class , name) \
{#name , offsetof(struct virtio_net_stats_tx_ ## class , tx_ ## name), -1}
static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
VIRTNET_STATS_DESC_CQ(command_num),
VIRTNET_STATS_DESC_CQ(ok_num),
};
static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
VIRTNET_STATS_DESC_RX(basic, packets),
VIRTNET_STATS_DESC_RX(basic, bytes),
VIRTNET_STATS_DESC_RX(basic, notifications),
VIRTNET_STATS_DESC_RX(basic, interrupts),
};
static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
VIRTNET_STATS_DESC_TX(basic, packets),
VIRTNET_STATS_DESC_TX(basic, bytes),
VIRTNET_STATS_DESC_TX(basic, notifications),
VIRTNET_STATS_DESC_TX(basic, interrupts),
};
static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
VIRTNET_STATS_DESC_RX(csum, needs_csum),
};
static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
};
static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
};
static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
};
#define VIRTNET_STATS_DESC_RX_QSTAT(class , name, qstat_field) \
{ \
#name , \
offsetof(struct virtio_net_stats_rx_ ## class , rx_ ## name), \
offsetof(struct netdev_queue_stats_rx, qstat_field), \
}
#define VIRTNET_STATS_DESC_TX_QSTAT(class , name, qstat_field) \
{ \
#name , \
offsetof(struct virtio_net_stats_tx_ ## class , tx_ ## name), \
offsetof(struct netdev_queue_stats_tx, qstat_field), \
}
static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops),
VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
};
static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops),
VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
};
static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
};
static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
};
static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
};
static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
};
static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
#define VIRTNET_Q_TYPE_RX 0
#define VIRTNET_Q_TYPE_TX 1
#define VIRTNET_Q_TYPE_CQ 2
struct virtnet_interrupt_coalesce {
u32 max_packets;
u32 max_usecs;
};
/* The dma information of pages allocated at a time. */
struct virtnet_rq_dma {
dma_addr_t addr;
u32 ref;
u16 len;
u16 need_sync;
};
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
struct virtqueue *vq;
/* TX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Name of the send queue: output.$index */
char name[16];
struct virtnet_sq_stats stats;
struct virtnet_interrupt_coalesce intr_coal;
struct napi_struct napi;
/* Record whether sq is in reset state. */
bool reset;
struct xsk_buff_pool *xsk_pool;
dma_addr_t xsk_hdr_dma_addr;
};
/* Internal representation of a receive virtqueue */
struct receive_queue {
/* Virtqueue associated with this receive_queue */
struct virtqueue *vq;
struct napi_struct napi;
struct bpf_prog __rcu *xdp_prog;
struct virtnet_rq_stats stats;
/* The number of rx notifications */
u16 calls;
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
/* Used to protect dim_enabled and inter_coal */
struct mutex dim_lock;
/* Dynamic Interrupt Moderation */
struct dim dim;
u32 packets_in_napi;
struct virtnet_interrupt_coalesce intr_coal;
/* Chain pages by the private ptr. */
struct page *pages;
/* Average packet length for mergeable receive buffers. */
struct ewma_pkt_len mrg_avg_pkt_len;
/* Page frag for packet buffer allocation. */
struct page_frag alloc_frag;
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Min single buffer size for mergeable buffers case. */
unsigned int min_buf_len;
/* Name of this receive queue: input.$index */
char name[16];
struct xdp_rxq_info xdp_rxq;
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
struct xsk_buff_pool *xsk_pool;
/* xdp rxq used by xsk */
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
};
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
};
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *cvq;
struct net_device *dev;
struct send_queue *sq;
struct receive_queue *rq;
unsigned int status;
/* Max # of queue pairs supported by the device */
u16 max_queue_pairs;
/* # of queue pairs currently used by the driver */
u16 curr_queue_pairs;
/* # of XDP queue pairs currently used by the driver */
u16 xdp_queue_pairs;
/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
bool xdp_enabled;
/* I like... big packets and I cannot lie! */
bool big_packets;
/* number of sg entries allocated for big packets */
unsigned int big_packets_num_skbfrags;
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
/* Host supports rss and/or hash report */
bool has_rss;
bool has_rss_hash_report;
u8 rss_key_size;
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
struct virtio_net_rss_config_hdr *rss_hdr;
struct virtio_net_rss_config_trailer rss_trailer;
u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
/* Lock to protect the control VQ */
struct mutex cvq_lock;
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
/* Packet virtio header size */
u8 hdr_len;
/* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
/* UDP tunnel support */
bool tx_tnl;
bool rx_tnl;
bool rx_tnl_csum;
/* Is delayed refill enabled? */
bool refill_enabled;
/* The lock to synchronize the access to refill_enabled */
spinlock_t refill_lock;
/* Work struct for config space updates */
struct work_struct config_work;
/* Work struct for setting rx mode */
struct work_struct rx_mode_work;
/* OK to queue work setting RX mode? */
bool rx_mode_work_enabled;
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
/* CPU hotplug instances for online & dead */
struct hlist_node node;
struct hlist_node node_dead;
struct control_buf *ctrl;
/* Ethtool settings */
u8 duplex;
u32 speed;
/* Is rx dynamic interrupt moderation enabled? */
bool rx_dim_enabled;
/* Interrupt coalescing settings */
struct virtnet_interrupt_coalesce intr_coal_tx;
struct virtnet_interrupt_coalesce intr_coal_rx;
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
/* failover when STANDBY feature enabled */
struct failover *failover;
u64 device_stats_cap;
};
struct padded_vnet_hdr {
struct virtio_net_hdr_v1_hash hdr;
/*
* hdr is in a separate sg buffer, and data sg buffer shares same page
* with this header sg. This padding makes next sg 16 byte aligned
* after the header.
*/
char padding[12];
};
struct virtio_net_common_hdr {
union {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
struct virtio_net_hdr_v1_hash hash_v1_hdr;
struct virtio_net_hdr_v1_hash_tunnel tnl_hdr;
};
};
static struct virtio_net_common_hdr xsk_hdr;
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
struct net_device *dev,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats);
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
struct sk_buff *skb, u8 flags);
static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize);
static void virtnet_xsk_completed(struct send_queue *sq, int num);
enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_SKB,
VIRTNET_XMIT_TYPE_SKB_ORPHAN,
VIRTNET_XMIT_TYPE_XDP,
VIRTNET_XMIT_TYPE_XSK,
};
static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
{
u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
}
static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
{
return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
}
/* We use the last two bits of the pointer to distinguish the xmit type. */
#define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
#define VIRTIO_XSK_FLAG_OFFSET 2
static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
{
unsigned long p = (unsigned long )*ptr;
*ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
return p & VIRTNET_XMIT_TYPE_MASK;
}
static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
{
return (void *)((unsigned long )ptr | type);
}
static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
enum virtnet_xmit_type type)
{
return virtqueue_add_outbuf(sq->vq, sq->sg, num,
virtnet_xmit_ptr_pack(data, type),
GFP_ATOMIC);
}
static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
{
return ((unsigned long )ptr) >> VIRTIO_XSK_FLAG_OFFSET;
}
static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
{
sg_dma_address(sg) = addr;
sg_dma_len(sg) = len;
}
static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
bool in_napi, struct virtnet_sq_free_stats *stats)
{
struct xdp_frame *frame;
struct sk_buff *skb;
unsigned int len;
void *ptr;
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
switch (virtnet_xmit_ptr_unpack(&ptr)) {
case VIRTNET_XMIT_TYPE_SKB:
skb = ptr;
pr_debug("Sent skb %p\n" , skb);
stats->napi_packets++;
stats->napi_bytes += skb->len;
napi_consume_skb(skb, in_napi);
break ;
case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
skb = ptr;
stats->packets++;
stats->bytes += skb->len;
napi_consume_skb(skb, in_napi);
break ;
case VIRTNET_XMIT_TYPE_XDP:
frame = ptr;
stats->packets++;
stats->bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
break ;
case VIRTNET_XMIT_TYPE_XSK:
stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
stats->xsk++;
break ;
}
}
netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
}
static void virtnet_free_old_xmit(struct send_queue *sq,
struct netdev_queue *txq,
bool in_napi,
struct virtnet_sq_free_stats *stats)
{
__free_old_xmit(sq, txq, in_napi, stats);
if (stats->xsk)
virtnet_xsk_completed(sq, stats->xsk);
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
static int vq2txq(struct virtqueue *vq)
{
return (vq->index - 1) / 2;
}
static int txq2vq(int txq)
{
return txq * 2 + 1;
}
static int vq2rxq(struct virtqueue *vq)
{
return vq->index / 2;
}
static int rxq2vq(int rxq)
{
return rxq * 2;
}
static int vq_type(struct virtnet_info *vi, int qid)
{
if (qid == vi->max_queue_pairs * 2)
return VIRTNET_Q_TYPE_CQ;
if (qid % 2)
return VIRTNET_Q_TYPE_TX;
return VIRTNET_Q_TYPE_RX;
}
static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff *skb)
{
return (struct virtio_net_common_hdr *)skb->cb;
}
/*
* private is used to chain pages for big packets, put the whole
* most recent used list in the beginning for reuse
*/
static void give_pages(struct receive_queue *rq, struct page *page)
{
struct page *end;
/* Find end of list, sew whole thing into vi->rq.pages. */
for (end = page; end->private ; end = (struct page *)end->private );
end->private = (unsigned long )rq->pages;
rq->pages = page;
}
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
{
struct page *p = rq->pages;
if (p) {
rq->pages = (struct page *)p->private ;
/* clear private here, it is used to chain pages */
p->private = 0;
} else
p = alloc_page(gfp_mask);
return p;
}
static void virtnet_rq_free_buf(struct virtnet_info *vi,
struct receive_queue *rq, void *buf)
{
if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(rq, buf);
else
put_page(virt_to_head_page(buf));
}
static void enable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
vi->refill_enabled = true ;
spin_unlock_bh(&vi->refill_lock);
}
static void disable_delayed_refill(struct virtnet_info *vi)
{
spin_lock_bh(&vi->refill_lock);
vi->refill_enabled = false ;
spin_unlock_bh(&vi->refill_lock);
}
static void enable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
vi->rx_mode_work_enabled = true ;
rtnl_unlock();
}
static void disable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
vi->rx_mode_work_enabled = false ;
rtnl_unlock();
}
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
if (napi_schedule_prep(napi)) {
virtqueue_disable_cb(vq);
__napi_schedule(napi);
}
}
static bool virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
int opaque;
opaque = virtqueue_enable_cb_prepare(vq);
if (napi_complete_done(napi, processed)) {
if (unlikely(virtqueue_poll(vq, opaque)))
virtqueue_napi_schedule(napi, vq);
else
return true ;
} else {
virtqueue_disable_cb(vq);
}
return false ;
}
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
if (napi->weight)
virtqueue_napi_schedule(napi, vq);
else
/* We were probably waiting for more output buffers. */
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
#define MRG_CTX_HEADER_SHIFT 22
static void *mergeable_len_to_ctx(unsigned int truesize,
unsigned int headroom)
{
return (void *)(unsigned long )((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
}
static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
{
return (unsigned long )mrg_ctx >> MRG_CTX_HEADER_SHIFT;
}
static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
{
return (unsigned long )mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}
static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
unsigned int len)
{
unsigned int headroom, tailroom, room, truesize;
truesize = mergeable_ctx_to_truesize(mrg_ctx);
headroom = mergeable_ctx_to_headroom(mrg_ctx);
tailroom = headroom ? sizeof (struct skb_shared_info) : 0;
room = SKB_DATA_ALIGN(headroom + tailroom);
if (len > truesize - room) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n" ,
dev->name, len, (unsigned long )(truesize - room));
DEV_STATS_INC(dev, rx_length_errors);
return -1;
}
return 0;
}
static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
unsigned int headroom,
unsigned int len)
{
struct sk_buff *skb;
skb = build_skb(buf, buflen);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, headroom);
skb_put(skb, len);
return skb;
}
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
unsigned int headroom)
{
struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
unsigned int copy, hdr_len, hdr_padded_len;
struct page *page_to_free = NULL;
int tailroom, shinfo_size;
char *p, *hdr_p, *buf;
p = page_address(page) + offset;
hdr_p = p;
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
hdr_padded_len = hdr_len;
else
hdr_padded_len = sizeof (struct padded_vnet_hdr);
buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
tailroom = truesize - headroom - hdr_padded_len - len;
shinfo_size = SKB_DATA_ALIGN(sizeof (struct skb_shared_info));
if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
skb = virtnet_build_skb(buf, truesize, p - buf, len);
if (unlikely(!skb))
return NULL;
page = (struct page *)page->private ;
if (page)
give_pages(rq, page);
goto ok;
}
/* copy small packet so we can reuse these pages for small data */
skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
/* Copy all frame if it fits skb->head, otherwise
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
*/
if (len <= skb_tailroom(skb))
copy = len;
else
copy = ETH_HLEN;
skb_put_data(skb, p, copy);
len -= copy;
offset += copy;
if (vi->mergeable_rx_bufs) {
if (len)
skb_add_rx_frag(skb, 0, page, offset, len, truesize);
else
page_to_free = page;
goto ok;
}
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned )PAGE_SIZE - offset, len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
frag_size, truesize);
len -= frag_size;
page = (struct page *)page->private ;
offset = 0;
}
if (page)
give_pages(rq, page);
ok:
hdr = skb_vnet_common_hdr(skb);
memcpy(hdr, hdr_p, hdr_len);
if (page_to_free)
put_page(page_to_free);
return skb;
}
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct page *page = virt_to_head_page(buf);
struct virtnet_rq_dma *dma;
void *head;
int offset;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(page);
dma = head;
--dma->ref;
if (dma->need_sync && len) {
offset = buf - (head + sizeof (*dma));
virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
offset, len,
DMA_FROM_DEVICE);
}
if (dma->ref)
return ;
virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
put_page(page);
}
static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
void *buf;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf)
virtnet_rq_unmap(rq, buf, *len);
return buf;
}
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
dma_addr_t addr;
u32 offset;
void *head;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(rq->alloc_frag.page);
offset = buf - head;
dma = head;
addr = dma->addr - sizeof (*dma) + offset;
sg_init_table(rq->sg, 1);
sg_fill_dma(rq->sg, addr, len);
}
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rq_dma *dma;
void *buf, *head;
dma_addr_t addr;
BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
head = page_address(alloc_frag->page);
dma = head;
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma->len = alloc_frag->size - sizeof (*dma);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof (*dma);
rq->last_dma = dma;
}
++dma->ref;
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
alloc_frag->offset += size;
return buf;
}
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
struct receive_queue *rq;
int i = vq2rxq(vq);
rq = &vi->rq[i];
if (rq->xsk_pool) {
xsk_buff_free((struct xdp_buff *)buf);
return ;
}
if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
}
static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
bool in_napi)
{
struct virtnet_sq_free_stats stats = {0};
virtnet_free_old_xmit(sq, txq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
if (!stats.packets && !stats.napi_packets)
return ;
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
u64_stats_update_end(&sq->stats.syncp);
}
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false ;
else if (q < vi->curr_queue_pairs)
return true ;
else
return false ;
}
static bool tx_may_stop(struct virtnet_info *vi,
struct net_device *dev,
struct send_queue *sq)
{
int qnum;
qnum = sq - vi->sq;
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
* An alternative would be to force queuing layer to requeue the skb by
* returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
* returned in a normal path of operation: it means that driver is not
* maintaining the TX queue stop/start state properly, and causes
* the stack to do a non-trivial amount of useless work.
* Since most packets only take 1 or 2 ring slots, stopping the queue
* early means 16 slots are typically wasted.
*/
if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
netif_tx_stop_queue(txq);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
return true ;
}
return false ;
}
static void check_sq_full_and_disable(struct virtnet_info *vi,
struct net_device *dev,
struct send_queue *sq)
{
bool use_napi = sq->napi.weight;
int qnum;
qnum = sq - vi->sq;
if (tx_may_stop(vi, dev, sq)) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit(sq, txq, false );
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
netif_start_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
u64_stats_update_end(&sq->stats.syncp);
virtqueue_disable_cb(sq->vq);
}
}
}
}
/* Note that @len is the length of received data without virtio header */
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
struct receive_queue *rq, void *buf,
u32 len, bool first_buf)
{
struct xdp_buff *xdp;
u32 bufsize;
xdp = (struct xdp_buff *)buf;
/* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
* virtio header and ask the vhost to fill data from
* hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
* The first buffer has virtio header so the remaining region for frame
* data is
* xsk_pool_get_rx_frame_size()
* While other buffers than the first one do not have virtio header, so
* the maximum frame data's length can be
* xsk_pool_get_rx_frame_size() + vi->hdr_len
*/
bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
if (!first_buf)
bufsize += vi->hdr_len;
if (unlikely(len > bufsize)) {
pr_debug("%s: rx error: len %u exceeds truesize %u\n" ,
vi->dev->name, len, bufsize);
DEV_STATS_INC(vi->dev, rx_length_errors);
xsk_buff_free(xdp);
return NULL;
}
if (first_buf) {
xsk_buff_set_size(xdp, len);
} else {
xdp_prepare_buff(xdp, xdp->data_hard_start,
XDP_PACKET_HEADROOM - vi->hdr_len, len, 1);
xdp->flags = 0;
}
xsk_buff_dma_sync_for_cpu(xdp);
return xdp;
}
static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
unsigned int size;
size = xdp->data_end - xdp->data_hard_start;
skb = napi_alloc_skb(&rq->napi, size);
if (unlikely(!skb)) {
xsk_buff_free(xdp);
return NULL;
}
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
size = xdp->data_end - xdp->data_meta;
memcpy(__skb_put(skb, size), xdp->data_meta, size);
if (metasize) {
__skb_pull(skb, metasize);
skb_metadata_set(skb, metasize);
}
xsk_buff_free(xdp);
return skb;
}
static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
struct receive_queue *rq, struct xdp_buff *xdp,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct bpf_prog *prog;
u32 ret;
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
if (prog)
ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
rcu_read_unlock();
switch (ret) {
case XDP_PASS:
return xsk_construct_skb(rq, xdp);
case XDP_TX:
case XDP_REDIRECT:
return NULL;
default :
/* drop packet */
xsk_buff_free(xdp);
u64_stats_inc(&stats->drops);
return NULL;
}
}
static void xsk_drop_follow_bufs(struct net_device *dev,
struct receive_queue *rq,
u32 num_buf,
struct virtnet_rq_stats *stats)
{
struct xdp_buff *xdp;
u32 len;
while (num_buf-- > 1) {
xdp = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!xdp)) {
pr_debug("%s: rx error: %d buffers missing\n" ,
dev->name, num_buf);
DEV_STATS_INC(dev, rx_length_errors);
break ;
}
u64_stats_add(&stats->bytes, len);
xsk_buff_free(xdp);
}
}
static int xsk_append_merge_buffer(struct virtnet_info *vi,
struct receive_queue *rq,
struct sk_buff *head_skb,
u32 num_buf,
struct virtio_net_hdr_mrg_rxbuf *hdr,
struct virtnet_rq_stats *stats)
{
struct sk_buff *curr_skb;
struct xdp_buff *xdp;
u32 len, truesize;
struct page *page;
void *buf;
curr_skb = head_skb;
while (--num_buf) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n" ,
vi->dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
hdr->num_buffers));
DEV_STATS_INC(vi->dev, rx_length_errors);
return -EINVAL;
}
u64_stats_add(&stats->bytes, len);
xdp = buf_to_xdp(vi, rq, buf, len, false );
if (!xdp)
goto err;
buf = napi_alloc_frag(len);
if (!buf) {
xsk_buff_free(xdp);
goto err;
}
memcpy(buf, xdp->data, len);
xsk_buff_free(xdp);
page = virt_to_page(buf);
truesize = len;
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
buf, len, truesize);
if (!curr_skb) {
put_page(page);
goto err;
}
}
return 0;
err:
xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
return -EINVAL;
}
static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
struct receive_queue *rq, struct xdp_buff *xdp,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 ret, num_buf;
hdr = xdp->data - vi->hdr_len;
num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
/* TODO: support multi buffer. */
if (num_buf == 1)
ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
stats);
else
ret = XDP_ABORTED;
}
rcu_read_unlock();
switch (ret) {
case XDP_PASS:
skb = xsk_construct_skb(rq, xdp);
if (!skb)
goto drop_bufs;
if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
dev_kfree_skb(skb);
goto drop;
}
return skb;
case XDP_TX:
case XDP_REDIRECT:
return NULL;
default :
/* drop packet */
xsk_buff_free(xdp);
}
drop_bufs:
xsk_drop_follow_bufs(dev, rq, num_buf, stats);
drop:
u64_stats_inc(&stats->drops);
return NULL;
}
static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, u32 len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb = NULL;
struct xdp_buff *xdp;
u8 flags;
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
xdp = buf_to_xdp(vi, rq, buf, len, true );
if (!xdp)
return ;
if (unlikely(len < ETH_HLEN)) {
pr_debug("%s: short packet %i\n" , dev->name, len);
DEV_STATS_INC(dev, rx_length_errors);
xsk_buff_free(xdp);
return ;
}
flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
if (!vi->mergeable_rx_bufs)
skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
else
skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
if (skb)
virtnet_receive_done(vi, rq, skb, flags);
}
static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
struct xsk_buff_pool *pool, gfp_t gfp)
{
struct xdp_buff **xsk_buffs;
dma_addr_t addr;
int err = 0;
u32 len, i;
int num;
xsk_buffs = rq->xsk_buffs;
num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
if (!num)
return -ENOMEM;
len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
for (i = 0; i < num; ++i) {
/* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
* We assume XDP_PACKET_HEADROOM is larger than hdr->len.
* (see function virtnet_xsk_pool_enable)
*/
addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
sg_init_table(rq->sg, 1);
sg_fill_dma(rq->sg, addr, len);
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
xsk_buffs[i], NULL, gfp);
if (err)
goto err;
}
return num;
err:
for (; i < num; ++i)
xsk_buff_free(xsk_buffs[i]);
return err;
}
static void *virtnet_xsk_to_ptr(u32 len)
{
unsigned long p;
p = len << VIRTIO_XSK_FLAG_OFFSET;
return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
}
static int virtnet_xsk_xmit_one(struct send_queue *sq,
struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
struct virtnet_info *vi;
dma_addr_t addr;
vi = sq->vq->vdev->priv;
addr = xsk_buff_raw_get_dma(pool, desc->addr);
xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
sg_init_table(sq->sg, 2);
sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
sg_fill_dma(sq->sg + 1, addr, desc->len);
return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
virtnet_xsk_to_ptr(desc->len),
GFP_ATOMIC);
}
static int virtnet_xsk_xmit_batch(struct send_queue *sq,
struct xsk_buff_pool *pool,
unsigned int budget,
u64 *kicks)
{
struct xdp_desc *descs = pool->tx_descs;
bool kick = false ;
u32 nb_pkts, i;
int err;
budget = min_t(u32, budget, sq->vq->num_free);
nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
if (!nb_pkts)
return 0;
for (i = 0; i < nb_pkts; i++) {
err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
if (unlikely(err)) {
xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
break ;
}
kick = true ;
}
if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
(*kicks)++;
return i;
}
static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
int budget)
{
struct virtnet_info *vi = sq->vq->vdev->priv;
struct virtnet_sq_free_stats stats = {};
struct net_device *dev = vi->dev;
u64 kicks = 0;
int sent;
/* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
* free_old_xmit().
*/
__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true , &stats);
if (stats.xsk)
xsk_tx_completed(sq->xsk_pool, stats.xsk);
sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
check_sq_full_and_disable(vi, vi->dev, sq);
if (sent) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
txq_trans_cond_update(txq);
}
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_add(&sq->stats.bytes, stats.bytes);
u64_stats_add(&sq->stats.kicks, kicks);
u64_stats_add(&sq->stats.xdp_tx, sent);
u64_stats_update_end(&sq->stats.syncp);
if (xsk_uses_need_wakeup(pool))
xsk_set_tx_need_wakeup(pool);
return sent;
}
static void xsk_wakeup(struct send_queue *sq)
{
if (napi_if_scheduled_mark_missed(&sq->napi))
return ;
local_bh_disable();
virtqueue_napi_schedule(&sq->napi, sq->vq);
local_bh_enable();
}
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
{
struct virtnet_info *vi = netdev_priv(dev);
struct send_queue *sq;
if (!netif_running(dev))
return -ENETDOWN;
if (qid >= vi->curr_queue_pairs)
return -EINVAL;
sq = &vi->sq[qid];
xsk_wakeup(sq);
return 0;
}
static void virtnet_xsk_completed(struct send_queue *sq, int num)
{
xsk_tx_completed(sq->xsk_pool, num);
/* If this is called by rx poll, start_xmit and xdp xmit we should
* wakeup the tx napi to consume the xsk tx queue, because the tx
* interrupt may not be triggered.
*/
xsk_wakeup(sq);
}
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
struct skb_shared_info *shinfo;
u8 nr_frags = 0;
int err, i;
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
if (unlikely(xdp_frame_has_frags(xdpf))) {
shinfo = xdp_get_shared_info_from_frame(xdpf);
nr_frags = shinfo->nr_frags;
}
/* In wrapping function virtnet_xdp_xmit(), we need to free
* up the pending old buffers, where we need to calculate the
* position of skb_shared_info in xdp_get_frame_len() and
* xdp_return_frame(), which will involve to xdpf->data and
* xdpf->headroom. Therefore, we need to update the value of
* headroom synchronously here.
*/
xdpf->headroom -= vi->hdr_len;
xdpf->data -= vi->hdr_len;
/* Zero header and leave csum up to XDP layers */
hdr = xdpf->data;
memset(hdr, 0, vi->hdr_len);
xdpf->len += vi->hdr_len;
sg_init_table(sq->sg, nr_frags + 1);
sg_set_buf(sq->sg, xdpf->data, xdpf->len);
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &shinfo->frags[i];
sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
skb_frag_size(frag), skb_frag_off(frag));
}
err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
return 0;
}
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
* the current cpu, so it does not need to be locked.
*
* Here we use marco instead of inline functions because we have to deal with
* three issues at the same time: 1. the choice of sq. 2. judge and execute the
* lock/unlock of txq 3. make sparse happy. It is difficult for two inline
* functions to perfectly solve these three problems at the same time.
*/
#define virtnet_xdp_get_sq(vi) ({ \
int cpu = smp_processor_id(); \
struct netdev_queue *txq; \
typeof(vi) v = (vi); \
unsigned int qp; \
\
if (v->curr_queue_pairs > nr_cpu_ids) { \
qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
qp += cpu; \
txq = netdev_get_tx_queue(v->dev, qp); \
__netif_tx_acquire(txq); \
} else { \
qp = cpu % v->curr_queue_pairs; \
txq = netdev_get_tx_queue(v->dev, qp); \
__netif_tx_lock(txq, cpu); \
} \
v->sq + qp; \
})
#define virtnet_xdp_put_sq(vi, q) { \
struct netdev_queue *txq; \
typeof(vi) v = (vi); \
\
txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
if (v->curr_queue_pairs > nr_cpu_ids) \
__netif_tx_release(txq); \
else \
__netif_tx_unlock(txq); \
}
static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtnet_sq_free_stats stats = {0};
struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
int nxmit = 0;
int kicks = 0;
int ret;
int i;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
*/
xdp_prog = rcu_access_pointer(rq->xdp_prog);
if (!xdp_prog)
return -ENXIO;
sq = virtnet_xdp_get_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
goto out;
}
/* Free up any pending old buffers before queueing new ones. */
virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
false , &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
break ;
nxmit++;
}
ret = nxmit;
if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
check_sq_full_and_disable(vi, dev, sq);
if (flags & XDP_XMIT_FLUSH) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
kicks = 1;
}
out:
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_add(&sq->stats.bytes, stats.bytes);
u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_add(&sq->stats.xdp_tx, n);
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
u64_stats_add(&sq->stats.kicks, kicks);
u64_stats_update_end(&sq->stats.syncp);
virtnet_xdp_put_sq(vi, sq);
return ret;
}
static void put_xdp_frags(struct xdp_buff *xdp)
{
struct skb_shared_info *shinfo;
struct page *xdp_page;
int i;
if (xdp_buff_has_frags(xdp)) {
shinfo = xdp_get_shared_info_from_buff(xdp);
for (i = 0; i < shinfo->nr_frags; i++) {
xdp_page = skb_frag_page(&shinfo->frags[i]);
put_page(xdp_page);
}
}
}
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
struct net_device *dev,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct xdp_frame *xdpf;
int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
u64_stats_inc(&stats->xdp_packets);
switch (act) {
case XDP_PASS:
return act;
case XDP_TX:
u64_stats_inc(&stats->xdp_tx);
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
netdev_dbg(dev, "convert buff to frame failed for xdp\n" );
return XDP_DROP;
}
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
} else if (unlikely(err < 0)) {
trace_xdp_exception(dev, xdp_prog, act);
return XDP_DROP;
}
*xdp_xmit |= VIRTIO_XDP_TX;
return act;
case XDP_REDIRECT:
u64_stats_inc(&stats->xdp_redirects);
err = xdp_do_redirect(dev, xdp, xdp_prog);
if (err)
return XDP_DROP;
*xdp_xmit |= VIRTIO_XDP_REDIR;
return act;
default :
bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
return XDP_DROP;
}
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
*
* 1) Packet is scattered across multiple rx buffers.
* 2) Headroom space is insufficient.
*
* This is inefficient but it's a temporary condition that
* we hit right after XDP is enabled and until queue is refilled
* with large buffers with sufficient headroom - so it should affect
* at most queue size packets.
* Afterwards, the conditions to enable
* XDP should preclude the underlying device from sending packets
* across multiple buffers (num_buf > 1), and we make sure buffers
* have enough headroom.
*/
static struct page *xdp_linearize_page(struct net_device *dev,
struct receive_queue *rq,
int *num_buf,
struct page *p,
int offset,
int page_off,
unsigned int *len)
{
int tailroom = SKB_DATA_ALIGN(sizeof (struct skb_shared_info));
struct page *page;
if (page_off + *len + tailroom > PAGE_SIZE)
return NULL;
page = alloc_page(GFP_ATOMIC);
if (!page)
return NULL;
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len;
/* Only mergeable mode can go inside this while loop. In small mode,
* *num_buf == 1, so it cannot go inside.
*/
while (--*num_buf) {
unsigned int buflen;
void *buf;
void *ctx;
int off;
buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
if (unlikely(!buf))
goto err_buf;
p = virt_to_head_page(buf);
off = buf - page_address(p);
if (check_mergeable_len(dev, ctx, buflen)) {
put_page(p);
goto err_buf;
}
/* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU.
*/
if ((page_off + buflen + tailroom) > PAGE_SIZE) {
put_page(p);
goto err_buf;
}
memcpy(page_address(page) + page_off,
page_address(p) + off, buflen);
page_off += buflen;
put_page(p);
}
/* Headroom does not contribute to packet length */
*len = page_off - XDP_PACKET_HEADROOM;
return page;
err_buf:
__free_pages(page, 0);
return NULL;
}
static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
unsigned int xdp_headroom,
void *buf,
unsigned int len)
{
unsigned int header_offset;
unsigned int headroom;
unsigned int buflen;
struct sk_buff *skb;
header_offset = VIRTNET_RX_PAD + xdp_headroom;
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof (struct skb_shared_info));
skb = virtnet_build_skb(buf, buflen, headroom, len);
if (unlikely(!skb))
return NULL;
buf += header_offset;
memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
return skb;
}
static struct sk_buff *receive_small_xdp(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
struct bpf_prog *xdp_prog,
void *buf,
unsigned int xdp_headroom,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
unsigned int headroom = vi->hdr_len + header_offset;
struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
struct page *page = virt_to_head_page(buf);
struct page *xdp_page;
unsigned int buflen;
struct xdp_buff xdp;
struct sk_buff *skb;
unsigned int metasize = 0;
u32 act;
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
/* Partially checksummed packets must be dropped. */
if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
goto err_xdp;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof (struct skb_shared_info));
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
int num_buf = 1;
xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof (struct skb_shared_info));
xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
goto err_xdp;
buf = page_address(xdp_page);
put_page(page);
page = xdp_page;
}
xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
xdp_headroom, len, true );
act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
switch (act) {
case XDP_PASS:
/* Recalculate length in case bpf program changed it */
len = xdp.data_end - xdp.data;
metasize = xdp.data - xdp.data_meta;
break ;
case XDP_TX:
case XDP_REDIRECT:
goto xdp_xmit;
default :
goto err_xdp;
}
skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
if (unlikely(!skb))
goto err;
if (metasize)
skb_metadata_set(skb, metasize);
return skb;
err_xdp:
u64_stats_inc(&stats->xdp_drops);
err:
u64_stats_inc(&stats->drops);
put_page(page);
xdp_xmit:
return NULL;
}
static struct sk_buff *receive_small(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
unsigned int xdp_headroom = (unsigned long )ctx;
struct page *page = virt_to_head_page(buf);
struct sk_buff *skb;
/* We passed the address of virtnet header to virtio-core,
* so truncate the padding.
*/
buf -= VIRTNET_RX_PAD + xdp_headroom;
len -= vi->hdr_len;
u64_stats_add(&stats->bytes, len);
if (unlikely(len > GOOD_PACKET_LEN)) {
pr_debug("%s: rx error: len %u exceeds max size %d\n" ,
dev->name, len, GOOD_PACKET_LEN);
DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
if (unlikely(vi->xdp_enabled)) {
struct bpf_prog *xdp_prog;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
xdp_headroom, len, xdp_xmit,
stats);
rcu_read_unlock();
return skb;
}
rcu_read_unlock();
}
skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
if (likely(skb))
return skb;
err:
u64_stats_inc(&stats->drops);
put_page(page);
return NULL;
}
static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
unsigned int len,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb;
/* Make sure that len does not exceed the size allocated in
* add_recvbuf_big.
*/
if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
pr_debug("%s: rx error: len %u exceeds allocated size %lu\n" ,
dev->name, len,
(vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
goto err;
}
skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
return skb;
err:
u64_stats_inc(&stats->drops);
give_pages(rq, page);
return NULL;
}
static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page;
void *buf;
int len;
while (num_buf-- > 1) {
buf = virtnet_rq_get_buf(rq, &len, NULL);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n" ,
dev->name, num_buf);
DEV_STATS_INC(dev, rx_length_errors);
break ;
}
u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
put_page(page);
}
}
/* Why not use xdp_build_skb_from_frame() ?
* XDP core assumes that xdp frags are PAGE_SIZE in length, while in
* virtio-net there are 2 points that do not match its requirements:
* 1. The size of the prefilled buffer is not fixed before xdp is set.
* 2. xdp_build_skb_from_frame() does more checks that we don't need,
* like eth_type_trans() (which virtio-net does in receive_buf()).
*/
static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
struct virtnet_info *vi,
struct xdp_buff *xdp,
unsigned int xdp_frags_truesz)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int headroom, data_len;
struct sk_buff *skb;
int metasize;
u8 nr_frags;
if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
pr_debug("Error building skb as missing reserved tailroom for xdp" );
return NULL;
}
if (unlikely(xdp_buff_has_frags(xdp)))
nr_frags = sinfo->nr_frags;
skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
headroom = xdp->data - xdp->data_hard_start;
data_len = xdp->data_end - xdp->data;
skb_reserve(skb, headroom);
__skb_put(skb, data_len);
metasize = xdp->data - xdp->data_meta;
metasize = metasize > 0 ? metasize : 0;
if (metasize)
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
xdp_frags_truesz,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
/* TODO: build xdp in big mode */
static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
struct xdp_buff *xdp,
void *buf,
unsigned int len,
unsigned int frame_sz,
int *num_buf,
unsigned int *xdp_frags_truesize,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
struct skb_shared_info *shinfo;
unsigned int xdp_frags_truesz = 0;
unsigned int truesize;
struct page *page;
skb_frag_t *frag;
int offset;
void *ctx;
xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true );
if (!*num_buf)
return 0;
if (*num_buf > 1) {
/* If we want to build multi-buffer xdp, we need
* to specify that the flags of xdp_buff have the
* XDP_FLAGS_HAS_FRAG bit.
*/
if (!xdp_buff_has_frags(xdp))
xdp_buff_set_frags_flag(xdp);
shinfo = xdp_get_shared_info_from_buff(xdp);
shinfo->nr_frags = 0;
shinfo->xdp_frags_size = 0;
}
if (*num_buf > MAX_SKB_FRAGS + 1)
return -EINVAL;
while (--*num_buf > 0) {
buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n" ,
dev->name, *num_buf,
virtio16_to_cpu(vi->vdev, hdr->num_buffers));
DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
offset = buf - page_address(page);
if (check_mergeable_len(dev, ctx, len)) {
put_page(page);
goto err;
}
truesize = mergeable_ctx_to_truesize(ctx);
xdp_frags_truesz += truesize;
frag = &shinfo->frags[shinfo->nr_frags++];
skb_frag_fill_page_desc(frag, page, offset, len);
if (page_is_pfmemalloc(page))
xdp_buff_set_frag_pfmemalloc(xdp);
shinfo->xdp_frags_size += len;
}
*xdp_frags_truesize = xdp_frags_truesz;
return 0;
err:
put_xdp_frags(xdp);
return -EINVAL;
}
static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
struct receive_queue *rq,
struct bpf_prog *xdp_prog,
void *ctx,
unsigned int *frame_sz,
int *num_buf,
struct page **page,
int offset,
unsigned int *len,
struct virtio_net_hdr_mrg_rxbuf *hdr)
{
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
struct page *xdp_page;
unsigned int xdp_room;
/* Transient failure which in theory could occur if
* in-flight packets from before XDP was enabled reach
* the receive path after XDP is loaded.
*/
if (unlikely(hdr->hdr.gso_type))
return NULL;
/* Partially checksummed packets must be dropped. */
if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
return NULL;
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
* hole mechanism for xdp. See add_recvbuf_mergeable().
*/
*frame_sz = truesize;
if (likely(headroom >= virtnet_get_headroom(vi) &&
(*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
return page_address(*page) + offset;
}
/* This happens when headroom is not enough because
* of the buffer was prefilled before XDP is set.
* This should only happen for the first several packets.
* In fact, vq reset can be used here to help us clean up
* the prefilled buffers, but many existing devices do not
* support it, and we don't want to bother users who are
* using xdp normally.
*/
if (!xdp_prog->aux->xdp_has_frags) {
/* linearize data for XDP */
xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
*page, offset,
XDP_PACKET_HEADROOM,
len);
if (!xdp_page)
return NULL;
} else {
xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
sizeof (struct skb_shared_info));
if (*len + xdp_room > PAGE_SIZE)
return NULL;
xdp_page = alloc_page(GFP_ATOMIC);
if (!xdp_page)
return NULL;
memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
page_address(*page) + offset, *len);
}
*frame_sz = PAGE_SIZE;
put_page(*page);
*page = xdp_page;
return page_address(*page) + XDP_PACKET_HEADROOM;
}
static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
struct bpf_prog *xdp_prog,
void *buf,
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
unsigned int xdp_frags_truesz = 0;
struct sk_buff *head_skb;
unsigned int frame_sz;
struct xdp_buff xdp;
void *data;
u32 act;
int err;
data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
offset, &len, hdr);
if (unlikely(!data))
goto err_xdp;
err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
&num_buf, &xdp_frags_truesz, stats);
if (unlikely(err))
goto err_xdp;
act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
switch (act) {
case XDP_PASS:
head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
if (unlikely(!head_skb))
break ;
return head_skb;
case XDP_TX:
case XDP_REDIRECT:
return NULL;
default :
break ;
}
put_xdp_frags(&xdp);
err_xdp:
put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats);
u64_stats_inc(&stats->xdp_drops);
u64_stats_inc(&stats->drops);
return NULL;
}
static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize)
{
int num_skb_frags;
int offset;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!nskb))
return NULL;
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
curr_skb->next = nskb;
curr_skb = nskb;
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
head_skb->truesize += truesize;
}
offset = buf - page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
len, truesize);
} else {
skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, truesize);
}
return curr_skb;
}
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
head_skb = NULL;
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (check_mergeable_len(dev, ctx, len))
goto err_skb;
if (unlikely(vi->xdp_enabled)) {
struct bpf_prog *xdp_prog;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
len, xdp_xmit, stats);
rcu_read_unlock();
return head_skb;
}
rcu_read_unlock();
}
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
curr_skb = head_skb;
if (unlikely(!curr_skb))
goto err_skb;
while (--num_buf) {
buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n" ,
dev->name, num_buf,
virtio16_to_cpu(vi->vdev,
hdr->num_buffers));
DEV_STATS_INC(dev, rx_length_errors);
goto err_buf;
}
u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
if (check_mergeable_len(dev, ctx, len))
goto err_skb;
truesize = mergeable_ctx_to_truesize(ctx);
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
buf, len, truesize);
if (!curr_skb)
goto err_skb;
}
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
return head_skb;
err_skb:
put_page(page);
mergeable_buf_free(rq, num_buf, dev, stats);
err_buf:
u64_stats_inc(&stats->drops);
dev_kfree_skb(head_skb);
return NULL;
}
static inline u32
virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
{
return __le16_to_cpu(hdr_hash->hash_value_lo) |
(__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
}
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
struct sk_buff *skb)
{
enum pkt_hash_types rss_hash_type;
if (!hdr_hash || !skb)
return ;
switch (__le16_to_cpu(hdr_hash->hash_report)) {
case VIRTIO_NET_HASH_REPORT_TCPv4:
case VIRTIO_NET_HASH_REPORT_UDPv4:
case VIRTIO_NET_HASH_REPORT_TCPv6:
case VIRTIO_NET_HASH_REPORT_UDPv6:
case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=96 H=96 G=95
¤ Dauer der Verarbeitung: 0.22 Sekunden
¤
*© Formatika GbR, Deutschland