// SPDX-License-Identifier: GPL-2.0 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. * * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. * * References: * o IOC3 ASIC specification 4.51, 1996-04-18 * o IEEE 802.3 specification, 2000 edition * o DP38840A Specification, National Semiconductor, March 1997 * * To do: * * o Use prefetching for large packets. What is a good lower limit for * prefetching? * o Use hardware checksums. * o Which PHYs might possibly be attached to the IOC3 in real live, * which workarounds are required for them? Do we ever have Lucent's? * o For the 2.5 branch kill the mii-tool ioctls.
*/
/* Number of RX buffers. This is tunable in the range of 16 <= x < 512. * The value must be a power of two.
*/ #define RX_BUFFS 64 #define RX_RING_ENTRIES 512 /* fixed in hardware */ #define RX_RING_MASK (RX_RING_ENTRIES - 1) #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
/* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
offset = aligned_rx_skb_addr((unsignedlong)new_skb->data); if (offset)
skb_reserve(new_skb, offset);
d = dma_map_single(ip->dma_dev, new_skb->data,
RX_BUF_SIZE, DMA_FROM_DEVICE);
if (memcmp(prefix, name, prefix_len) != 0) return 0;
/* found nvmem device which is attached to our ioc3 * now check for one wire family code 09, 89 and 91
*/ if (memcmp(name + prefix_len, "09-", 3) == 0) return 1; if (memcmp(name + prefix_len, "89-", 3) == 0) return 1; if (memcmp(name + prefix_len, "91-", 3) == 0) return 1;
return 0;
}
staticint ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6])
{ struct nvmem_device *nvmem; char prefix[24];
u8 prom[16]; int ret; int i;
/* Caller must hold the ioc3_lock ever for MII readers. This is also * used to protect the transmitter side but it's low contention.
*/ staticint ioc3_mdio_read(struct net_device *dev, int phy, int reg)
{ struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs;
while (readl(®s->micr) & MICR_BUSY)
;
writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
®s->micr); while (readl(®s->micr) & MICR_BUSY)
;
return readl(®s->midr_r) & MIDR_DATA_MASK;
}
staticvoid ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
{ struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs;
while (readl(®s->micr) & MICR_BUSY)
;
writel(data, ®s->midr_w);
writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr); while (readl(®s->micr) & MICR_BUSY)
;
}
/* Did hardware handle the checksum at all? The cases we can handle * are: * * - TCP and UDP checksums of IPv4 only. * - IPv6 would be doable but we keep that for later ... * - Only unfragmented packets. Did somebody already tell you * fragmentation is evil? * - don't care about packet size. Worst case when processing a * malformed packet we'll try to access the packet at ip header + * 64 bytes which is still inside the skb. Even in the unlikely * case where the checksum is right the higher layers will still * drop the packet as appropriate.
*/ if (eh->h_proto != htons(ETH_P_IP)) return;
ih = (struct iphdr *)((char *)eh + ETH_HLEN); if (ip_is_fragment(ih)) return;
proto = ih->protocol; if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) return;
if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { /* Ouch, drop packet and just recycle packet * to keep the ring filled.
*/
dev->stats.rx_dropped++;
new_skb = skb;
d = rxr[rx_entry]; goto next;
}
if (likely(dev->features & NETIF_F_RXCSUM))
ioc3_tcpudp_checksum(skb,
w0 & ERXBUF_IPCKSUM_MASK,
len);
dev->stats.rx_packets++; /* Statistics */
dev->stats.rx_bytes += len;
} else { /* The frame is invalid and the skb never * reached the network layer so we can just * recycle it.
*/
new_skb = skb;
d = rxr[rx_entry];
dev->stats.rx_errors++;
} if (err & ERXBUF_CRCERR) /* Statistics */
dev->stats.rx_crc_errors++; if (err & ERXBUF_FRAMERR)
dev->stats.rx_frame_errors++;
/* Deal with fatal IOC3 errors. This condition might be caused by a hard or * software problems, so we should try to recover * more gracefully if this ever happens. In theory we might be flooded * with such error interrupts if something really goes wrong, so we might * also consider to take the interface down.
*/ staticvoid ioc3_error(struct net_device *dev, u32 eisr)
{ struct ioc3_private *ip = netdev_priv(dev);
spin_lock(&ip->ioc3_lock);
if (eisr & EISR_RXOFLO)
net_err_ratelimited("%s: RX overflow.\n", dev->name); if (eisr & EISR_RXBUFOFLO)
net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); if (eisr & EISR_RXMEMERR)
net_err_ratelimited("%s: RX PCI error.\n", dev->name); if (eisr & EISR_RXPARERR)
net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); if (eisr & EISR_TXBUFUFLO)
net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); if (eisr & EISR_TXMEMERR)
net_err_ratelimited("%s: TX PCI error.\n", dev->name);
/* The interrupt handler does all of the Rx thread work and cleans up * after the Tx thread.
*/ static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
{ struct ioc3_private *ip = netdev_priv(dev_id); struct ioc3_ethregs *regs = ip->regs;
u32 eisr;
/* Try to find a PHY. There is no apparent relation between the MII addresses * in the SGI documentation and what we find in reality, so we simply probe * for the PHY.
*/ staticint ioc3_mii_init(struct ioc3_private *ip)
{
u16 word; int i;
for (i = 0; i < 32; i++) {
word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
/* Now the rx buffers. The RX ring may be larger but * we only allocate 16 buffers for now. Need to tune * this for performance and memory later.
*/ for (i = 0; i < RX_BUFFS; i++) { if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) return -ENOMEM;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) {
dev_err(&pdev->dev, "Invalid resource\n"); return -EINVAL;
} /* get mac addr from one wire prom */ if (ioc3eth_get_mac_addr(regs, mac_addr)) return -EPROBE_DEFER; /* not available yet */
dev = alloc_etherdev(sizeof(struct ioc3_private)); if (!dev) return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
ip = netdev_priv(dev);
ip->dma_dev = pdev->dev.parent;
ip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ip->regs)) {
err = PTR_ERR(ip->regs); goto out_free;
}
/* IOC3 has a fairly simple minded checksumming hardware which simply * adds up the 1's complement checksum for the entire packet and * inserts it at an offset which can be specified in the descriptor * into the transmit packet. This means we have to compensate for the * MAC header which should not be summed and the TCP/UDP pseudo headers * manually.
*/ if (skb->ip_summed == CHECKSUM_PARTIAL) { conststruct iphdr *ih = ip_hdr(skb); constint proto = ntohs(ih->protocol); unsignedint csoff;
u32 csum, ehsum;
u16 *eh;
/* The MAC header. skb->mac seem the logic approach * to find the MAC header - except it's a NULL pointer ...
*/
eh = (u16 *)skb->data;
/* Sum up dest addr, src addr and protocol */
ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
/* Skip IP header; it's sum is always zero and was * already filled in by ip_output.c
*/
csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
ih->tot_len - (ih->ihl << 2),
proto, csum_fold(ehsum));
/* Given a multicast ethernet address, this routine calculates the * address's bit index in the logical address filter mask
*/ staticinlineunsignedint ioc3_hash(constunsignedchar *addr)
{ unsignedint temp = 0; int bits;
u32 crc;
crc = ether_crc_le(ETH_ALEN, addr);
crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ for (bits = 6; --bits >= 0; ) {
temp <<= 1;
temp |= (crc & 0x1);
crc >>= 1;
}
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > 64)) { /* Too many for hashing to make sense or we want all * multicast packets anyway, so skip computing all the * hashes and just accept all packets.
*/
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
netdev_for_each_mc_addr(ha, dev) {
ehar |= (1UL << ioc3_hash(ha->addr));
}
ip->ehar_h = ehar >> 32;
ip->ehar_l = ehar & 0xffffffff;
}
writel(ip->ehar_h, ®s->ehar_h);
writel(ip->ehar_l, ®s->ehar_l);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.