/* * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
*/ #define METH_MCF_LIMIT 32
/* * This structure is private to each device. It is used to pass * packets in and out, so there is place for a packet
*/ struct meth_private { struct platform_device *pdev;
/* in-memory copy of MAC Control register */
u64 mac_ctrl;
/* in-memory copy of DMA Control register */ unsignedlong dma_ctrl; /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ unsignedlong phy_addr;
tx_packet *tx_ring;
dma_addr_t tx_ring_dma; struct sk_buff *tx_skbs[TX_RING_ENTRIES];
dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; unsignedlong tx_read, tx_write, tx_count;
staticint meth_init_tx_ring(struct meth_private *priv)
{ /* Init TX ring */
priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM;
priv->tx_count = priv->tx_read = priv->tx_write = 0;
mace->eth.tx_ring_base = priv->tx_ring_dma; /* Now init skb save area */
memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); return 0;
}
staticint meth_init_rx_ring(struct meth_private *priv)
{ int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); /* 8byte status vector + 3quad padding + 2byte padding,
* to put data on 64bit aligned boundary */
skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); /* I'll need to re-sync it after each RX */
priv->rx_ring_dmas[i] =
dma_map_single(&priv->pdev->dev, priv->rx_ring[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[i];
}
priv->rx_write = 0; return 0;
} staticvoid meth_free_tx_ring(struct meth_private *priv)
{ int i;
/* Remove any pending skb */ for (i = 0; i < TX_RING_ENTRIES; i++) {
dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
priv->tx_ring_dma);
}
/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ staticvoid meth_free_rx_ring(struct meth_private *priv)
{ int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
priv->rx_ring[i] = 0;
priv->rx_ring_dmas[i] = 0;
kfree_skb(priv->rx_skbs[i]);
}
}
int meth_reset(struct net_device *dev)
{ struct meth_private *priv = netdev_priv(dev);
/* * Open and close
*/ staticint meth_open(struct net_device *dev)
{ struct meth_private *priv = netdev_priv(dev); int ret;
priv->phy_addr = -1; /* No PHY is known yet... */
/* Initialize the hardware */
ret = meth_reset(dev); if (ret < 0) return ret;
/* Allocate the ring buffers */
ret = meth_init_tx_ring(priv); if (ret < 0) return ret;
ret = meth_init_rx_ring(priv); if (ret < 0) goto out_free_tx_ring;
ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); if (ret) {
printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); goto out_free_rx_ring;
}
printk(KERN_WARNING "meth: error status: 0x%08x\n",status); /* check for errors too... */ if (status & (METH_INT_TX_LINK_FAIL))
printk(KERN_WARNING "meth: link failure\n"); /* Should I do full reset in this case? */ if (status & (METH_INT_MEM_ERROR))
printk(KERN_WARNING "meth: memory error\n"); if (status & (METH_INT_TX_ABORT))
printk(KERN_WARNING "meth: aborted\n"); if (status & (METH_INT_RX_OVERFLOW))
printk(KERN_WARNING "meth: Rx overflow\n"); if (status & (METH_INT_RX_UNDERFLOW)) {
printk(KERN_WARNING "meth: Rx underflow\n");
spin_lock_irqsave(&priv->meth_lock, flags);
mace->eth.int_stat = METH_INT_RX_UNDERFLOW; /* more underflow interrupts will be delivered, * effectively throwing us into an infinite loop.
* Thus I stop processing Rx in this case. */
priv->dma_ctrl &= ~METH_DMA_RX_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
DPRINTK("Disabled meth Rx DMA temporarily\n");
spin_unlock_irqrestore(&priv->meth_lock, flags);
}
mace->eth.int_stat = METH_INT_ERROR;
}
status = mace->eth.int_stat; while (status & 0xff) { /* First handle errors - if we get Rx underflow, * Rx DMA will be disabled, and Rx handler will reenable * it. I don't think it's possible to get Rx underflow,
* without getting Rx interrupt */ if (status & METH_INT_ERROR) {
meth_error(dev, status);
} if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { /* a transmission is over: free the skb */
meth_tx_cleanup(dev, status);
} if (status & METH_INT_RX_THRESHOLD) { if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) break; /* send it to meth_rx for handling */
meth_rx(dev, status);
}
status = mace->eth.int_stat;
}
return IRQ_HANDLED;
}
/* * Transmits packets that fit into TX descriptor (are <=120B)
*/ staticvoid meth_tx_short_prepare(struct meth_private *priv, struct sk_buff *skb)
{
tx_packet *desc = &priv->tx_ring[priv->tx_write]; int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); /* maybe I should set whole thing to 0 first... */
skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); if (skb->len < len)
memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
} #define TX_CATBUF1 BIT(25) staticvoid meth_tx_1page_prepare(struct meth_private *priv, struct sk_buff *skb)
{
tx_packet *desc = &priv->tx_ring[priv->tx_write]; void *buffer_data = (void *)(((unsignedlong)skb->data + 7) & ~7); int unaligned_len = (int)((unsignedlong)buffer_data - (unsignedlong)skb->data); int buffer_len = skb->len - unaligned_len;
dma_addr_t catbuf;
meth_add_to_tx_ring(priv, skb);
netif_trans_update(dev); /* save the timestamp */
/* If TX ring is full, tell the upper layer to stop sending packets */ if (meth_tx_full(dev)) {
printk(KERN_DEBUG "TX full: stopping\n");
netif_stop_queue(dev);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.