/* NAPI schedule quota */ #define CFV_DEFAULT_QUOTA 32
/* Defaults used if virtio config space is unavailable */ #define CFV_DEF_MTU_SIZE 4096 #define CFV_DEF_HEADROOM 32 #define CFV_DEF_TAILROOM 32
/* Required IP header alignment */ #define IP_HDR_ALIGN 4
/* struct cfv_napi_contxt - NAPI context info * @riov: IOV holding data read from the ring. Note that riov may * still hold data when cfv_rx_poll() returns. * @head: Last descriptor ID we received from vringh_getdesc_kern. * We use this to put descriptor back on the used ring. USHRT_MAX is * used to indicate invalid head-id.
*/ struct cfv_napi_context { struct vringh_kiov riov; unsignedshort head;
};
/* struct cfv_stats - statistics for debugfs * @rx_napi_complete: Number of NAPI completions (RX) * @rx_napi_resched: Number of calls where the full quota was used (RX) * @rx_nomem: Number of SKB alloc failures (RX) * @rx_kicks: Number of RX kicks * @tx_full_ring: Number times TX ring was full * @tx_no_mem: Number of times TX went out of memory * @tx_flow_on: Number of flow on (TX) * @tx_kicks: Number of TX kicks
*/ struct cfv_stats {
u32 rx_napi_complete;
u32 rx_napi_resched;
u32 rx_nomem;
u32 rx_kicks;
u32 tx_full_ring;
u32 tx_no_mem;
u32 tx_flow_on;
u32 tx_kicks;
};
/* struct cfv_info - Caif Virtio control structure * @cfdev: caif common header * @vdev: Associated virtio device * @vr_rx: rx/downlink host vring * @vq_tx: tx/uplink virtqueue * @ndev: CAIF link layer device * @watermark_tx: indicates number of free descriptors we need * to reopen the tx-queues after overload. * @tx_lock: protects vq_tx from concurrent use * @tx_release_tasklet: Tasklet for freeing consumed TX buffers * @napi: Napi context used in cfv_rx_poll() * @ctx: Context data used in cfv_rx_poll() * @tx_hr: transmit headroom * @rx_hr: receive headroom * @tx_tr: transmit tail room * @rx_tr: receive tail room * @mtu: transmit max size * @mru: receive max size * @allocsz: size of dma memory reserved for TX buffers * @alloc_addr: virtual address to dma memory for TX buffers * @alloc_dma: dma address to dma memory for TX buffers * @genpool: Gen Pool used for allocating TX buffers * @reserved_mem: Pointer to memory reserve allocated from genpool * @reserved_size: Size of memory reserve allocated from genpool * @stats: Statistics exposed in sysfs * @debugfs: Debugfs dentry for statistic counters
*/ struct cfv_info { struct caif_dev_common cfdev; struct virtio_device *vdev; struct vringh *vr_rx; struct virtqueue *vq_tx; struct net_device *ndev; unsignedint watermark_tx; /* Protect access to vq_tx */
spinlock_t tx_lock; struct tasklet_struct tx_release_tasklet; struct napi_struct napi; struct cfv_napi_context ctx;
u16 tx_hr;
u16 rx_hr;
u16 tx_tr;
u16 rx_tr;
u32 mtu;
u32 mru;
size_t allocsz; void *alloc_addr;
dma_addr_t alloc_dma; struct gen_pool *genpool; unsignedlong reserved_mem;
size_t reserved_size; struct cfv_stats stats; struct dentry *debugfs;
};
/* struct buf_info - maintains transmit buffer data handle * @size: size of transmit buffer * @dma_handle: handle to allocated dma device memory area * @vaddr: virtual address mapping to allocated memory area
*/ struct buf_info {
size_t size;
u8 *vaddr;
};
/* Called from virtio device, in IRQ context */ staticvoid cfv_release_cb(struct virtqueue *vq_tx)
{ struct cfv_info *cfv = vq_tx->vdev->priv;
/* This is invoked whenever the remote processor completed processing * a TX msg we just sent, and the buffer is put back to the used ring.
*/ staticvoid cfv_release_used_buf(struct virtqueue *vq_tx)
{ struct cfv_info *cfv = vq_tx->vdev->priv; unsignedlong flags;
BUG_ON(vq_tx != cfv->vq_tx);
for (;;) { unsignedint len; struct buf_info *buf_info;
/* Get used buffer from used ring to recycle used descriptors */
spin_lock_irqsave(&cfv->tx_lock, flags);
buf_info = virtqueue_get_buf(vq_tx, &len);
spin_unlock_irqrestore(&cfv->tx_lock, flags);
/* Stop looping if there are no more buffers to free */ if (!buf_info) break;
free_buf_info(cfv, buf_info);
/* watermark_tx indicates if we previously stopped the tx * queues. If we have enough free stots in the virtio ring, * re-establish memory reserved and open up tx queues.
*/ if (cfv->vq_tx->num_free <= cfv->watermark_tx) continue;
/* Open up the tx queues */ if (cfv->reserved_mem) {
cfv->watermark_tx =
virtqueue_get_vring_size(cfv->vq_tx);
netif_tx_wake_all_queues(cfv->ndev); /* Buffers are recycled in cfv_netdev_tx, so * disable notifications when queues are opened.
*/
virtqueue_disable_cb(cfv->vq_tx);
++cfv->stats.tx_flow_on;
} else { /* if no memory reserve, wait for more free slots */
WARN_ON(cfv->watermark_tx >
virtqueue_get_vring_size(cfv->vq_tx));
cfv->watermark_tx +=
virtqueue_get_vring_size(cfv->vq_tx) / 4;
}
}
}
/* Allocate a SKB and copy packet data to it */ staticstruct sk_buff *cfv_alloc_and_copy_skb(int *err, struct cfv_info *cfv,
u8 *frm, u32 frm_len)
{ struct sk_buff *skb;
u32 cfpkt_len, pad_len;
*err = 0; /* Verify that packet size with down-link header and mtu size */ if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
netdev_err(cfv->ndev, "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
frm_len, cfv->mru, cfv->rx_hr,
cfv->rx_tr);
*err = -EPROTO; return NULL;
}
/* Get packets from the host vring */ staticint cfv_rx_poll(struct napi_struct *napi, int quota)
{ struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); int rxcnt = 0; int err = 0; void *buf; struct sk_buff *skb; struct vringh_kiov *riov = &cfv->ctx.riov; unsignedint skb_len;
do {
skb = NULL;
/* Put the previous iovec back on the used ring and * fetch a new iovec if we have processed all elements.
*/ if (riov->i == riov->used) { if (cfv->ctx.head != USHRT_MAX) {
vringh_complete_kern(cfv->vr_rx,
cfv->ctx.head,
0);
cfv->ctx.head = USHRT_MAX;
}
skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
riov->iov[riov->i].iov_len); if (unlikely(err)) gotoexit;
/* Push received packet up the stack. */
skb_len = skb->len;
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
skb->dev = cfv->ndev;
err = netif_receive_skb(skb); if (unlikely(err)) {
++cfv->ndev->stats.rx_dropped;
} else {
++cfv->ndev->stats.rx_packets;
cfv->ndev->stats.rx_bytes += skb_len;
}
++riov->i;
++rxcnt;
} while (rxcnt < quota);
++cfv->stats.rx_napi_resched; goto out;
exit: switch (err) { case 0:
++cfv->stats.rx_napi_complete;
/* Really out of packets? (stolen from virtio_net)*/
napi_complete(napi); if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
napi_schedule_prep(napi)) {
vringh_notify_disable_kern(cfv->vr_rx);
__napi_schedule(napi);
} break;
case -ENOMEM:
++cfv->stats.rx_nomem;
dev_kfree_skb(skb); /* Stop NAPI poll on OOM, we hope to be polled later */
napi_complete(napi);
vringh_notify_enable_kern(cfv->vr_rx); break;
staticint cfv_create_genpool(struct cfv_info *cfv)
{ int err;
/* dma_alloc can only allocate whole pages, and we need a more * fine graned allocation so we use genpool. We ask for space needed * by IP and a full ring. If the dma allcoation fails we retry with a * smaller allocation size.
*/
err = -ENOMEM;
cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) return -EINVAL;
for (;;) { if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
netdev_info(cfv->ndev, "Not enough device memory\n"); return -ENOMEM;
}
cfv->alloc_addr = dma_alloc_coherent(
cfv->vdev->dev.parent->parent,
cfv->allocsz, &cfv->alloc_dma,
GFP_ATOMIC); if (cfv->alloc_addr) break;
cfv->allocsz = (cfv->allocsz * 3) >> 2;
}
netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
cfv->allocsz);
/* Allocate on 128 bytes boundaries (1 << 7)*/
cfv->genpool = gen_pool_create(7, -1); if (!cfv->genpool) goto err;
/* Reserve some memory for low memory situations. If we hit the roof * in the memory pool, we stop TX flow and release the reserve.
*/
cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
cfv->reserved_size); if (!cfv->reserved_mem) {
err = -ENOMEM; goto err;
}
/* Schedule NAPI to read any pending packets */
napi_schedule(&cfv->napi); return 0;
}
/* Disable the CAIF interface and free the memory-pool */ staticint cfv_netdev_close(struct net_device *netdev)
{ struct cfv_info *cfv = netdev_priv(netdev); unsignedlong flags; struct buf_info *buf_info;
/* Disable interrupts, queues and NAPI polling */
netif_carrier_off(netdev);
virtqueue_disable_cb(cfv->vq_tx);
vringh_notify_disable_kern(cfv->vr_rx);
napi_disable(&cfv->napi);
/* Release any TX buffers on both used and available rings */
cfv_release_used_buf(cfv->vq_tx);
spin_lock_irqsave(&cfv->tx_lock, flags); while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
free_buf_info(cfv, buf_info);
spin_unlock_irqrestore(&cfv->tx_lock, flags);
/* Release all dma allocated memory and destroy the pool */
cfv_destroy_genpool(cfv); return 0;
}
/* Allocate a buffer in dma-memory and copy skb to it */ staticstruct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, struct sk_buff *skb, struct scatterlist *sg)
{ struct caif_payload_info *info = (void *)&skb->cb; struct buf_info *buf_info = NULL;
u8 pad_len, hdr_ofs;
/* Put the CAIF packet on the virtio ring and kick the receiver */ static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
{ struct cfv_info *cfv = netdev_priv(netdev); struct buf_info *buf_info; struct scatterlist sg; unsignedlong flags; bool flow_off = false; int ret;
/* garbage collect released buffers */
cfv_release_used_buf(cfv->vq_tx);
spin_lock_irqsave(&cfv->tx_lock, flags);
/* Flow-off check takes into account number of cpus to make sure * virtqueue will not be overfilled in any possible smp conditions. * * Flow-on is triggered when sufficient buffers are freed
*/ if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
flow_off = true;
cfv->stats.tx_full_ring++;
}
/* If we run out of memory, we release the memory reserve and retry * allocation.
*/
buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); if (unlikely(!buf_info)) {
cfv->stats.tx_no_mem++;
flow_off = true;
if (unlikely(flow_off)) { /* Turn flow on when a 1/4 of the descriptors are released */
cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4; /* Enable notifications of recycled TX buffers */
virtqueue_enable_cb(cfv->vq_tx);
netif_tx_stop_all_queues(netdev);
}
if (unlikely(!buf_info)) { /* If the memory reserve does it's job, this shouldn't happen */
netdev_warn(cfv->ndev, "Out of gen_pool memory\n"); goto err;
}
ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC); if (unlikely((ret < 0))) { /* If flow control works, this shouldn't happen */
netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
ret); goto err;
}
/* Get the RX virtio ring. This is a "host side vring". */
err = -ENODEV; if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) goto err;
err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); if (err) goto err;
/* Get the TX virtio ring. This is a "guest side vring". */
cfv->vq_tx = virtio_find_single_vq(vdev, cfv_release_cb, "output"); if (IS_ERR(cfv->vq_tx)) {
err = PTR_ERR(cfv->vq_tx); goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.