// SPDX-License-Identifier: GPL-2.0-only /* * Network device driver for the MACE ethernet controller on * Apple Powermacs. Assumes it's under a DBDMA controller. * * Copyright (C) 1996 Paul Mackerras.
*/
/* Bits in transmit DMA status */ #define TX_DMA_ERR 0x80
struct mace_data { volatilestruct mace __iomem *mace; volatilestruct dbdma_regs __iomem *tx_dma; int tx_dma_intr; volatilestruct dbdma_regs __iomem *rx_dma; int rx_dma_intr; volatilestruct dbdma_cmd *tx_cmds; /* xmit dma command list */ volatilestruct dbdma_cmd *rx_cmds; /* recv dma command list */ struct sk_buff *rx_bufs[N_RX_RING]; int rx_fill; int rx_empty; struct sk_buff *tx_bufs[N_TX_RING]; int tx_fill; int tx_empty; unsignedchar maccc; unsignedchar tx_fullup; unsignedchar tx_active; unsignedchar tx_bad_runt; struct timer_list tx_timeout; int timeout_active; int port_aaui; int chipid; struct macio_dev *mdev;
spinlock_t lock;
};
/* * Number of bytes of private data per MACE: allow enough for * the rx and tx dma commands plus a branch dma command each, * and another 16 bytes to allow us to align the dma command * buffers on a 16 byte boundary.
*/ #define PRIV_BYTES (sizeof(struct mace_data) \
+ (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n",
mace); return -ENODEV;
}
addr = of_get_property(mace, "mac-address", NULL); if (addr == NULL) {
addr = of_get_property(mace, "local-mac-address", NULL); if (addr == NULL) {
printk(KERN_ERR "Can't get mac-address for MACE %pOF\n",
mace); return -ENODEV;
}
}
/* * lazy allocate the driver-wide dummy buffer. (Note that we * never have more than one MACE in the system anyway)
*/ if (dummy_buf == NULL) {
dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); if (dummy_buf == NULL) return -ENOMEM;
}
/* * Yes this looks peculiar, but apparently it needs to be this * way on some machines.
*/ for (i = 200; i > 0; --i) if (le32_to_cpu(dma->control) & RUN)
udelay(1);
}
staticinlinevoid mace_clean_rings(struct mace_data *mp)
{ int i;
/* free some skb's */ for (i = 0; i < N_RX_RING; ++i) { if (mp->rx_bufs[i] != NULL) {
dev_kfree_skb(mp->rx_bufs[i]);
mp->rx_bufs[i] = NULL;
}
} for (i = mp->tx_empty; i != mp->tx_fill; ) {
dev_kfree_skb(mp->tx_bufs[i]); if (++i >= N_TX_RING)
i = 0;
}
}
/* initialize list of sk_buffs for receiving and set up recv dma */
mace_clean_rings(mp);
memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
cp = mp->rx_cmds; for (i = 0; i < N_RX_RING - 1; ++i) {
skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); if (!skb) {
data = dummy_buf;
} else {
skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
data = skb->data;
}
mp->rx_bufs[i] = skb;
cp->req_count = cpu_to_le16(RX_BUFLEN);
cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS);
cp->phy_addr = cpu_to_le32(virt_to_bus(data));
cp->xfer_status = 0;
++cp;
}
mp->rx_bufs[i] = NULL;
cp->command = cpu_to_le16(DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
/* Put a branch back to the beginning of the receive command list */
++cp;
cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
/* put a branch at the end of the tx command list */
cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
/* see if there's a free slot in the tx ring */
spin_lock_irqsave(&mp->lock, flags);
fill = mp->tx_fill;
next = fill + 1; if (next >= N_TX_RING)
next = 0; if (next == mp->tx_empty) {
netif_stop_queue(dev);
mp->tx_fullup = 1;
spin_unlock_irqrestore(&mp->lock, flags); return NETDEV_TX_BUSY; /* can't take it at the moment */
}
spin_unlock_irqrestore(&mp->lock, flags);
/* partially fill in the dma command block */
len = skb->len; if (len > ETH_FRAME_LEN) {
printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
len = ETH_FRAME_LEN;
}
mp->tx_bufs[fill] = skb;
cp = mp->tx_cmds + NCMDS_TX * fill;
cp->req_count = cpu_to_le16(len);
cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
i = mp->tx_empty; while (in_8(&mb->pr) & XMTSV) {
timer_delete(&mp->tx_timeout);
mp->timeout_active = 0; /* * Clear any interrupt indication associated with this status * word. This appears to unlatch any error indication from * the DMA controller.
*/
intr = in_8(&mb->ir); if (intr != 0)
mace_handle_misc_intrs(mp, intr, dev); if (mp->tx_bad_runt) {
fs = in_8(&mb->xmtfs);
mp->tx_bad_runt = 0;
out_8(&mb->xmtfc, AUTO_PAD_XMIT); continue;
}
dstat = le32_to_cpu(td->status); /* stop DMA controller */
out_le32(&td->control, RUN << 16); /* * xcount is the number of complete frames which have been * written to the fifo but for which status has not been read.
*/
xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; if (xcount == 0 || (dstat & DEAD)) { /* * If a packet was aborted before the DMA controller has * finished transferring it, it seems that there are 2 bytes * which are stuck in some buffer somewhere. These will get * transmitted as soon as we read the frame status (which * reenables the transmit data transfer request). Turning * off the DMA controller and/or resetting the MACE doesn't * help. So we disable auto-padding and FCS transmission * so the two bytes will only be a runt packet which should * be ignored by other stations.
*/
out_8(&mb->xmtfc, DXMTFCS);
}
fs = in_8(&mb->xmtfs); if ((fs & XMTSV) == 0) {
printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
fs, xcount, dstat);
mace_reset(dev); /* * XXX mace likes to hang the machine after a xmtfs error. * This is hard to reproduce, resetting *may* help
*/
}
cp = mp->tx_cmds + NCMDS_TX * i;
stat = le16_to_cpu(cp->xfer_status); if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { /* * Check whether there were in fact 2 bytes written to * the transmit FIFO.
*/
udelay(1);
x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; if (x != 0) { /* there were two bytes with an end-of-packet indication */
mp->tx_bad_runt = 1;
mace_set_timeout(dev);
} else { /* * Either there weren't the two bytes buffered up, or they * didn't have an end-of-packet indication. * We flush the transmit FIFO just in case (by setting the * XMTFWU bit with the transmitter disabled).
*/
out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
udelay(1);
out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
out_8(&mb->xmtfc, AUTO_PAD_XMIT);
}
} /* dma should have finished */ if (i == mp->tx_fill) {
printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
fs, xcount, dstat); continue;
} /* Update stats */ if (fs & (UFLO|LCOL|LCAR|RTRY)) {
++dev->stats.tx_errors; if (fs & LCAR)
++dev->stats.tx_carrier_errors; if (fs & (UFLO|LCOL|RTRY))
++dev->stats.tx_aborted_errors;
} else {
dev->stats.tx_bytes += mp->tx_bufs[i]->len;
++dev->stats.tx_packets;
}
dev_consume_skb_irq(mp->tx_bufs[i]);
--mp->tx_active; if (++i >= N_TX_RING)
i = 0; #if 0
mace_last_fs = fs;
mace_last_xcount = xcount; #endif
}
if (i != mp->tx_empty) {
mp->tx_fullup = 0;
netif_wake_queue(dev);
}
mp->tx_empty = i;
i += mp->tx_active; if (i >= N_TX_RING)
i -= N_TX_RING; if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { do { /* set up the next one */
cp = mp->tx_cmds + NCMDS_TX * i;
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, OUTPUT_LAST);
++mp->tx_active; if (++i >= N_TX_RING)
i = 0;
} while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
mace_set_timeout(dev);
}
spin_unlock_irqrestore(&mp->lock, flags); return IRQ_HANDLED;
}
/* update various counters */
mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
/* turn off both tx and rx and reset the chip */
out_8(&mb->maccc, 0);
printk(KERN_ERR "mace: transmit timeout - resetting\n");
dbdma_reset(td);
mace_reset(dev);
spin_lock_irqsave(&mp->lock, flags); for (i = mp->rx_empty; i != mp->rx_fill; ) {
cp = mp->rx_cmds + i;
stat = le16_to_cpu(cp->xfer_status); if ((stat & ACTIVE) == 0) {
next = i + 1; if (next >= N_RX_RING)
next = 0;
np = mp->rx_cmds + next; if (next != mp->rx_fill &&
(le16_to_cpu(np->xfer_status) & ACTIVE) != 0) {
printk(KERN_DEBUG "mace: lost a status word\n");
++mace_lost_status;
} else break;
}
nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count);
out_le16(&cp->command, DBDMA_STOP); /* got a packet, have a look at it */
skb = mp->rx_bufs[i]; if (!skb) {
++dev->stats.rx_dropped;
} elseif (nb > 8) {
data = skb->data;
frame_status = (data[nb-3] << 8) + data[nb-4]; if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
++dev->stats.rx_errors; if (frame_status & RS_OFLO)
++dev->stats.rx_over_errors; if (frame_status & RS_FRAMERR)
++dev->stats.rx_frame_errors; if (frame_status & RS_FCSERR)
++dev->stats.rx_crc_errors;
} else { /* Mace feature AUTO_STRIP_RCV is on by default, dropping the * FCS on frames with 802.3 headers. This means that Ethernet * frames have 8 extra octets at the end, while 802.3 frames
* have only 4. We need to correctly account for this. */ if (*(unsignedshort *)(data+12) < 1536) /* 802.3 header */
nb -= 4; else/* Ethernet header; mace includes FCS */
nb -= 8;
skb_put(skb, nb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
mp->rx_bufs[i] = NULL;
++dev->stats.rx_packets;
}
} else {
++dev->stats.rx_errors;
++dev->stats.rx_length_errors;
}
/* advance to next */ if (++i >= N_RX_RING)
i = 0;
}
mp->rx_empty = i;
i = mp->rx_fill; for (;;) {
next = i + 1; if (next >= N_RX_RING)
next = 0; if (next == mp->rx_empty) break;
cp = mp->rx_cmds + i;
skb = mp->rx_bufs[i]; if (!skb) {
skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); if (skb) {
skb_reserve(skb, 2);
mp->rx_bufs[i] = skb;
}
}
cp->req_count = cpu_to_le16(RX_BUFLEN);
data = skb? skb->data: dummy_buf;
cp->phy_addr = cpu_to_le32(virt_to_bus(data));
out_le16(&cp->xfer_status, 0);
out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); #if 0 if ((le32_to_cpu(rd->status) & ACTIVE) != 0) {
out_le32(&rd->control, (PAUSE << 16) | PAUSE); while ((in_le32(&rd->status) & ACTIVE) != 0)
;
} #endif
i = next;
} if (i != mp->rx_fill) {
out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
mp->rx_fill = i;
}
spin_unlock_irqrestore(&mp->lock, flags); return IRQ_HANDLED;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.