/* This structure is only used for MACB on SiFive FU540 devices */ struct sifive_fu540_macb_mgmt { void __iomem *reg; unsignedlong rate; struct clk_hw hw;
};
/* Max length of transmit frame must be a multiple of 8 bytes */ #define MACB_TX_LEN_ALIGN 8 #define MACB_MAX_TX_LEN ((unsignedint)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsignedint)(MACB_TX_LEN_ALIGN - 1))) /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a * false amba_error in TX path from the DMA assuming there is not enough * space in the SRAM (16KB) even when there is.
*/ #define GEM_MAX_TX_LEN (unsignedint)(0x3FC0)
/* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/ #define MACB_HALT_TIMEOUT 14000 #define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
/* DMA buffer descriptor might be different size * depends on hardware configuration: * * 1. dma address width 32 bits: * word 1: 32 bit address of Data Buffer * word 2: control * * 2. dma address width 64 bits: * word 1: 32 bit address of Data Buffer * word 2: control * word 3: upper 32 bit address of Data Buffer * word 4: unused * * 3. dma address width 32 bits with hardware timestamping: * word 1: 32 bit address of Data Buffer * word 2: control * word 3: timestamp word 1 * word 4: timestamp word 2 * * 4. dma address width 64 bits with hardware timestamping: * word 1: 32 bit address of Data Buffer * word 2: control * word 3: upper 32 bit address of Data Buffer * word 4: unused * word 5: timestamp word 1 * word 6: timestamp word 2
*/ staticunsignedint macb_dma_desc_get_size(struct macb *bp)
{ #ifdef MACB_EXT_DESC unsignedint desc_size;
/* Find the CPU endianness by using the loopback bit of NCR register. When the * CPU is in big endian we need to program swapped mode for management * descriptor access.
*/ staticbool hw_is_native_io(void __iomem *addr)
{
u32 value = MACB_BIT(LLB);
__raw_writel(value, addr + MACB_NCR);
value = __raw_readl(addr + MACB_NCR);
/* Write 0 back to disable everything */
__raw_writel(0, addr + MACB_NCR);
/* Check all 4 address register for valid address */ for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* Single register for all queues' high 32 bits. */ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
macb_writel(bp, RBQPH,
upper_32_bits(bp->queues[0].rx_ring_dma));
macb_writel(bp, TBQPH,
upper_32_bits(bp->queues[0].tx_ring_dma));
} #endif
/** * macb_set_tx_clk() - Set a clock to a new frequency * @bp: pointer to struct macb * @speed: New frequency in Hz
*/ staticvoid macb_set_tx_clk(struct macb *bp, int speed)
{ long ferr, rate, rate_rounded;
if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) return;
/* In case of MII the PHY is the clock master */ if (bp->phy_interface == PHY_INTERFACE_MODE_MII) return;
rate = rgmii_clock(speed); if (rate < 0) return;
rate_rounded = clk_round_rate(bp->tx_clk, rate); if (rate_rounded < 0) return;
/* RGMII allows 50 ppm frequency error. Test and warn if this limit * is not satisfied.
*/
ferr = abs(rate_rounded - rate);
ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5)
netdev_warn(bp->dev, "unable to generate target frequency: %ld Hz\n",
rate);
if (clk_set_rate(bp->tx_clk, rate_rounded))
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
/* Apply the new configuration, if any */ if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
if (old_ncr ^ ncr)
macb_or_gem_writel(bp, NCR, ncr);
/* Disable AN for SGMII fixed link configuration, enable otherwise. * Must be written after PCSSEL is set in NCFGR, * otherwise writes will not take effect.
*/ if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
u32 pcsctrl, old_pcsctrl;
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
ctrl &= ~MACB_BIT(PAE); if (macb_is_gem(bp)) {
ctrl &= ~GEM_BIT(GBE);
if (speed == SPEED_1000)
ctrl |= GEM_BIT(GBE);
}
if (rx_pause)
ctrl |= MACB_BIT(PAE);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers.
*/
bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
/* If we have a child named mdio, probe it instead of looking for PHYs * directly under the MAC node
*/ if (mdio_np) return of_mdiobus_register(bp->mii_bus, mdio_np);
/* Only create the PHY from the device tree if at least one PHY is * described. Otherwise scan the entire MDIO bus. We do this to support * old device tree that did not follow the best practices and did not * describe their network PHYs.
*/
for_each_available_child_of_node(np, child) if (of_mdiobus_child_is_phy(child)) { /* The loop increments the child refcount, * decrement it before returning.
*/
of_node_put(child);
/* With fixed-link, we don't need to register the MDIO bus, * except if we have a child named "mdio" in the device tree. * In that case, some devices may be attached to the MACB's MDIO bus.
*/
mdio_np = of_get_child_by_name(np, "mdio"); if (!mdio_np && of_phy_is_fixed_link(np)) return macb_mii_probe(bp->dev);
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr); /* The low bits of RX address contain the RX_USED bit, clearing * of which allows packet RX. Make sure the high bits are also * visible to HW at that point.
*/
dma_wmb();
} #endif
desc->addr = lower_32_bits(addr);
}
queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls * macb_tx_complete(), which in turn may call netif_wake_subqueue(). * As explained below, we have to halt the transmission before updating * TBQP registers so we call netif_tx_stop_all_queues() to notify the * network engine about the macb/gem being halted.
*/
napi_disable(&queue->napi_tx);
spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev);
/* Stop transmission now * (in case we have just queued new packets) * macb/gem must be halted to write TBQP register
*/ if (macb_halt_tx(bp)) {
netdev_err(bp->dev, "BUG: halt tx timed out\n");
macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
halt_timeout = true;
}
/* Treat frames in TX queue including the ones that caused the error. * Free transmit buffers in upper layer.
*/ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
u32 ctrl;
if (ctrl & MACB_BIT(TX_USED)) { /* skb is set for the last buffer of the frame */ while (!skb) {
macb_tx_unmap(bp, tx_skb, 0);
tail++;
tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
}
/* ctrl still refers to the first buffer descriptor * since it's the only one written back by the hardware
*/ if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
bytes += skb->len;
}
} else { /* "Buffers exhausted mid-frame" errors may only happen * if the driver is buggy, so complain loudly about * those. Statistics are updated by hardware.
*/ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev, "BUG: TX buffers exhausted mid-frame\n");
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); /* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
/* No need to parse packet if PTP TS is not involved */ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) goto not_oss;
/* Identify and return whether PTP one step sync is being processed */
ptp_class = ptp_classify_raw(skb); if (ptp_class == PTP_CLASS_NONE) goto not_oss;
hdr = ptp_parse_header(skb, ptp_class); if (!hdr) goto not_oss;
if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) goto not_oss;
msgtype = ptp_get_msgtype(hdr, ptp_class); if (msgtype == PTP_MSGTYPE_SYNC) returntrue;
/* Make hw descriptor updates visible to CPU */
rmb();
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); if (unlikely(!skb)) {
netdev_err(bp->dev, "Unable to allocate sk_buff\n"); break;
}
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size,
DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb); break;
}
queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
desc->ctrl = 0; /* Setting addr clears RX_USED and allows reception, * make sure ctrl is cleared first to avoid a race.
*/
dma_wmb();
macb_set_addr(bp, desc, paddr);
/* Mark DMA descriptors from begin up to and not including end as unused */ staticvoid discard_partial_frame(struct macb_queue *queue, unsignedint begin, unsignedint end)
{ unsignedint frag;
/* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the * payload word-aligned. * * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy * the two padding bytes into the skb so that we avoid hitting * the slowpath in memcpy(), and pull them off afterwards.
*/
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); if (!skb) {
bp->dev->stats.rx_dropped++; for (frag = first_frag; ; frag++) {
desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) break;
}
/* Make descriptor updates visible to hardware */
wmb();
return 1;
}
offset = 0;
len += NET_IP_ALIGN;
skb_checksum_none_assert(skb);
skb_put(skb, len);
/* Packet completions only seem to propagate to raise * interrupts when interrupts are enabled at the time, so if * packets were received while interrupts were disabled, * they will not cause another interrupt to be generated when * interrupts are re-enabled. * Check for this case here to avoid losing a wakeup. This can * potentially race with the interrupt handler doing the same * actions if an interrupt is raised just after enabling them, * but this should be harmless.
*/ if (macb_rx_pending(queue)) {
queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
napi_schedule(napi);
}
}
rmb(); // ensure txubr_pending is up to date if (queue->txubr_pending) {
queue->txubr_pending = false;
netdev_vdbg(bp->dev, "poll: tx restart\n");
macb_tx_restart(queue);
}
/* Packet completions only seem to propagate to raise * interrupts when interrupts are enabled at the time, so if * packets were sent while interrupts were disabled, * they will not cause another interrupt to be generated when * interrupts are re-enabled. * Check for this case here to avoid losing a wakeup. This can * potentially race with the interrupt handler doing the same * actions if an interrupt is raised just after enabling them, * but this should be harmless.
*/ if (macb_tx_complete_pending(queue)) {
queue_writel(queue, IDR, MACB_BIT(TCOMP)); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
napi_schedule(napi);
}
}
while (status) { /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) {
queue_writel(queue, IDR, -1); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1); break;
}
if (status & bp->rx_intr_mask) { /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts * now.
*/
queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&queue->napi_rx)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&queue->napi_rx);
}
}
if (status & (MACB_BIT(TCOMP) |
MACB_BIT(TXUBR))) {
queue_writel(queue, IDR, MACB_BIT(TCOMP)); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TCOMP) |
MACB_BIT(TXUBR));
if (status & MACB_BIT(TXUBR)) {
queue->txubr_pending = true;
wmb(); // ensure softirq can see update
}
if (napi_schedule_prep(&queue->napi_tx)) {
netdev_vdbg(bp->dev, "scheduling TX softirq\n");
__napi_schedule(&queue->napi_tx);
}
}
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
schedule_work(&queue->tx_error_task);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
break;
}
/* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY.
*/
/* There is a hardware issue under heavy load where DMA can * stop, this causes endless "used buffer descriptor read" * interrupts but it can be cleared by re-enabling RX. See * the at91rm9200 manual, section 41.3.1 or the Zynq manual * section 16.7.4 for details. RXUBR is only enabled for * these two versions.
*/ if (status & MACB_BIT(RXUBR)) {
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
wmb();
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RXUBR));
}
if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */
spin_lock(&bp->stats_lock); if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++; else
bp->hw_stats.macb.rx_overruns++;
spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(HRESP));
}
status = queue_readl(queue, ISR);
}
spin_unlock(&bp->lock);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled.
*/ staticvoid macb_poll_controller(struct net_device *dev)
{ struct macb *bp = netdev_priv(dev); struct macb_queue *queue; unsignedlong flags; unsignedint q;
/* Save info to properly release resources */
tx_skb->skb = NULL;
tx_skb->mapping = mapping;
tx_skb->size = size;
tx_skb->mapped_as_page = true;
len -= size;
offset += size;
count++;
tx_head++;
}
}
/* Should never happen */ if (unlikely(!tx_skb)) {
netdev_err(bp->dev, "BUG! empty skb!\n"); return 0;
}
/* This is the last buffer of the frame: save socket buffer */
tx_skb->skb = skb;
/* Update TX ring: update buffer descriptors in reverse order * to avoid race condition
*/
/* Set 'TX_USED' bit in buffer descriptor at tx_head position * to set the end of TX queue
*/
i = tx_head;
entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
desc = macb_tx_desc(queue, entry);
desc->ctrl = ctrl;
if (lso_ctrl) { if (lso_ctrl == MACB_LSO_UFO_ENABLE) /* include header and FCS in value given to h/w */
mss_mfs = skb_shinfo(skb)->gso_size +
skb_transport_offset(skb) +
ETH_FCS_LEN; else/* TSO */ {
mss_mfs = skb_shinfo(skb)->gso_size; /* TCP Sequence Number Source Select * can be set only for TSO
*/
seq_ctrl = 0;
}
}
/* First descriptor is header descriptor */ if (i == queue->tx_head) {
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); if ((bp->dev->features & NETIF_F_HW_CSUM) &&
skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
!ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else /* Only set MSS/MFS on payload descriptors * (second or later descriptor)
*/
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
macb_set_addr(bp, desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing * 'TX_USED' bit in desc->ctrl.
*/
wmb();
desc->ctrl = ctrl;
} while (i != queue->tx_head);
/* there is only one buffer or protocol is not UDP */ if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) return features;
/* length of header */
hdrlen = skb_transport_offset(skb);
/* For UFO only: * When software supplies two or more payload buffers all payload buffers * apart from the last must be a multiple of 8 bytes in size.
*/ if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) return features & ~MACB_NETIF_LSO;
nr_frags = skb_shinfo(skb)->nr_frags; /* No need to check last fragment */
nr_frags--; for (f = 0; f < nr_frags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) return features & ~MACB_NETIF_LSO;
} return features;
}
staticinlineint macb_clear_csum(struct sk_buff *skb)
{ /* no change for packets without checksum offloading */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0;
/* make sure we can modify the header */ if (unlikely(skb_cow_head(skb, 0))) return -1;
/* initialize checksum field * This is required - at least for Zynq, which otherwise calculates * wrong UDP header checksums for UDP packets with UDP data len <=2
*/
*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; return 0;
}
if (padlen <= 0) { /* FCS could be appeded to tailroom. */ if (tailroom >= ETH_FCS_LEN) goto add_fcs; /* No room for FCS, need to reallocate skb. */ else
padlen = ETH_FCS_LEN;
} else { /* Add room for FCS. */
padlen += ETH_FCS_LEN;
}
if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); if (!nskb) return -ENOMEM;
dev_consume_skb_any(*skb);
*skb = nskb;
}
if (padlen > ETH_FCS_LEN)
skb_put_zero(*skb, padlen - ETH_FCS_LEN);
add_fcs: /* set FCS to packet */
fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
fcs = ~fcs;
if (is_lso) { /* length of headers */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) /* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb); else
hdrlen = skb_tcp_all_headers(skb); if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); /* if this is required, would need to copy to single buffer */ return NETDEV_TX_BUSY;
}
} else
hdrlen = min(skb_headlen(skb), bp->max_tx_length);
#ifdefined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
queue_index, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true); #endif
/* Count how many TX buffer descriptors are needed to send this * socket buffer: skb fragments of jumbo frames may need to be * split into many buffer descriptors.
*/ if (is_lso && (skb_headlen(skb) > hdrlen)) /* extra header descriptor if also payload in first buffer */
desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; else
desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) {
frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */ if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
ret = NETDEV_TX_BUSY; goto unlock;
}
/* Map socket buffer for DMA transfer */ if (!macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb); goto unlock;
}
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
skb->len);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.