/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ /* * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. * * Copyright 1993 United States Government as represented by the * Director, National Security Agency. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * This driver is for PCnet32 and PCnetPCI based ethercards
*/ /************************************************************************** * 23 Oct, 2000. * Fixed a few bugs, related to running the controller in 32bit mode. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. *
*************************************************************************/
/* * PCI device identifiers for "new style" Linux PCI Device Drivers
*/ staticconststruct pci_device_id pcnet32_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
/* * Adapters that were sold with IBM's RS/6000 or pSeries hardware have * the incorrect vendor id.
*/
{ PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
.class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
#define MAX_UNITS 8 /* More are supported, limit only on options */ staticint options[MAX_UNITS]; staticint full_duplex[MAX_UNITS]; staticint homepna[MAX_UNITS];
/* * Theory of Operation * * This driver uses the same software structure as the normal lance * driver. So look for a verbose description in lance.c. The differences * to the normal lance driver is the use of the 32bit mode of PCnet32 * and PCnetPCI chips. Because these chips are 32bit chips, there is no * 16MB limitation and we don't need bounce buffers.
*/
/* * Set the number of Tx and Rx buffers, using Log_2(# buffers). * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
*/ #ifndef PCNET32_LOG_TX_BUFFERS #define PCNET32_LOG_TX_BUFFERS 4 #define PCNET32_LOG_RX_BUFFERS 5 #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ #define PCNET32_LOG_MAX_RX_BUFFERS 9 #endif
/* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block {
__le16 mode;
__le16 tlen_rlen;
u8 phys_addr[6];
__le16 reserved;
__le32 filter[2]; /* Receive and transmit ring base, along with extra bits. */
__le32 rx_ring;
__le32 tx_ring;
};
/* * The first field of pcnet32_private is read by the ethernet device * so the structure should be allocated using dma_alloc_coherent().
*/ struct pcnet32_private { struct pcnet32_init_block *init_block; /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ struct pcnet32_rx_head *rx_ring; struct pcnet32_tx_head *tx_ring;
dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
returned by dma_alloc_coherent */ struct pci_dev *pci_dev; constchar *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ struct sk_buff **tx_skbuff; struct sk_buff **rx_skbuff;
dma_addr_t *tx_dma_addr;
dma_addr_t *rx_dma_addr; conststruct pcnet32_access *a;
spinlock_t lock; /* Guard lock */ unsignedint cur_rx, cur_tx; /* The next free ring entry */ unsignedint rx_ring_size; /* current rx ring size */ unsignedint tx_ring_size; /* current tx ring size */ unsignedint rx_mod_mask; /* rx ring modular mask */ unsignedint tx_mod_mask; /* tx ring modular mask */ unsignedshort rx_len_bits; unsignedshort tx_len_bits;
dma_addr_t rx_ring_dma_addr;
dma_addr_t tx_ring_dma_addr; unsignedint dirty_rx, /* ring entries to be freed. */
dirty_tx;
struct net_device *dev; struct napi_struct napi; char tx_full; char phycount; /* number of phys found */ int options; unsignedint shared_irq:1, /* shared irq possible */
dxsuflo:1, /* disable transmit stop on uflo */
mii:1, /* mii port available */
autoneg:1, /* autoneg enabled */
port_tp:1, /* port set to TP */
fdx:1; /* full duplex enabled */ struct net_device *next; struct mii_if_info mii_if; struct timer_list watchdog_timer;
u32 msg_enable; /* debug message level */
/* each bit indicates an available PHY */
u32 phymask; unsignedshort chip_version; /* which variant this is */
/* saved registers during ethtool blink */
u16 save_regs[4];
};
netif_wake_queue(dev);
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
napi_enable_locked(&lp->napi);
}
/* * Allocate space for the new sized tx ring. * Free old resources * Save new resources. * Any failure keeps old resources. * Must be called with lp->lock held.
*/ staticvoid pcnet32_realloc_tx_ring(struct net_device *dev, struct pcnet32_private *lp, unsignedint size)
{
dma_addr_t new_ring_dma_addr;
dma_addr_t *new_dma_addr_list; struct pcnet32_tx_head *new_tx_ring; struct sk_buff **new_skb_list; unsignedint entries = BIT(size);
spin_lock_irqsave(&lp->lock, flags); if (lp->mii) {
r = mii_link_ok(&lp->mii_if);
} elseif (lp->chip_version == PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */ /* only read link if port is set to TP */ if (!lp->autoneg && lp->port_tp)
r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); else/* link always up for AUI port or port auto select */
r = 1;
} elseif (lp->chip_version > PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
} else { /* can not detect link on really old chips */
r = 1;
}
spin_unlock_irqrestore(&lp->lock, flags);
/* set the minimum ring size to 4, to allow the loopback test to work * unchanged.
*/ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { if (size <= (1 << i)) break;
} if ((1 << i) != lp->tx_ring_size)
pcnet32_realloc_tx_ring(dev, lp, i);
size = min(ering->rx_pending, (unsignedint)RX_MAX_RING_SIZE); for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { if (size <= (1 << i)) break;
} if ((1 << i) != lp->rx_ring_size)
pcnet32_realloc_rx_ring(dev, lp, i);
lp->napi.weight = lp->rx_ring_size / 2;
if (netif_running(dev)) {
pcnet32_netif_start(dev);
pcnet32_restart(dev, CSR0_NORMAL);
}
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
/* Initialize Transmit buffers. */
size = data_len + 15; for (x = 0; x < numbuffs; x++) {
skb = netdev_alloc_skb(dev, size); if (!skb) {
netif_printk(lp, hw, KERN_DEBUG, dev, "Cannot allocate skb at line: %d!\n",
__LINE__); goto clean_up;
}
packet = skb->data;
skb_put(skb, size); /* create space for data */
lp->tx_skbuff[x] = skb;
lp->tx_ring[x].length = cpu_to_le16(-skb->len);
lp->tx_ring[x].misc = 0;
/* put DA and SA into the skb */ for (i = 0; i < 6; i++)
*packet++ = dev->dev_addr[i]; for (i = 0; i < 6; i++)
*packet++ = dev->dev_addr[i]; /* type */
*packet++ = 0x08;
*packet++ = 0x06; /* packet number */
*packet++ = x; /* fill packet with data */ for (i = 0; i < data_len; i++)
*packet++ = i;
lp->tx_dma_addr[x] =
dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
DMA_TO_DEVICE); if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
netif_printk(lp, hw, KERN_DEBUG, dev, "DMA mapping error at line: %d!\n",
__LINE__); goto clean_up;
}
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
}
x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
a->write_bcr(ioaddr, 32, x | 0x0002);
/* set int loopback in CSR15 */
x = a->read_csr(ioaddr, CSR15) & 0xfffc;
lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
teststatus = cpu_to_le16(0x8000);
lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
/* Check status of descriptors */ for (x = 0; x < numbuffs; x++) {
ticks = 0;
rmb(); while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
spin_unlock_irqrestore(&lp->lock, flags);
msleep(1);
spin_lock_irqsave(&lp->lock, flags);
rmb();
ticks++;
} if (ticks == 200) {
netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); break;
}
}
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb(); if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
for (x = 0; x < numbuffs; x++) {
netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
skb = lp->rx_skbuff[x]; for (i = 0; i < size; i++)
pr_cont(" %02x", *(skb->data + i));
pr_cont("\n");
}
}
x = 0;
rc = 0; while (x < numbuffs && !rc) {
skb = lp->rx_skbuff[x];
packet = lp->tx_skbuff[x]->data; for (i = 0; i < size; i++) { if (*(skb->data + i) != packet[i]) {
netif_printk(lp, hw, KERN_DEBUG, dev, "Error in compare! %2x - %02x %02x\n",
i, *(skb->data + i), packet[i]);
rc = 1; break;
}
}
x++;
}
switch (state) { case ETHTOOL_ID_ACTIVE: /* Save the current value of the bcrs */
spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++)
lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
spin_unlock_irqrestore(&lp->lock, flags); return 2; /* cycle on/off twice per second */
case ETHTOOL_ID_ON: case ETHTOOL_ID_OFF: /* Blink the led */
spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
spin_unlock_irqrestore(&lp->lock, flags); break;
case ETHTOOL_ID_INACTIVE: /* Restore the original value of the bcrs */
spin_lock_irqsave(&lp->lock, flags); for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
spin_unlock_irqrestore(&lp->lock, flags);
} return 0;
}
/* * process one receive descriptor entry
*/
staticvoid pcnet32_rx_entry(struct net_device *dev, struct pcnet32_private *lp, struct pcnet32_rx_head *rxp, int entry)
{ int status = (short)le16_to_cpu(rxp->status) >> 8; int rx_in_place = 0; struct sk_buff *skb; short pkt_len;
if (status != 0x03) { /* There was an error. */ /* * There is a tricky error noted by John Murphy, * <murf@perftech.com> to Russ Nelson: Even with full-sized * buffers it's possible for a jabber packet to use two * buffers, with only the last correctly noting the error.
*/ if (status & 0x01) /* Only count a general error at the */
dev->stats.rx_errors++; /* end of a packet. */ if (status & 0x20)
dev->stats.rx_frame_errors++; if (status & 0x10)
dev->stats.rx_over_errors++; if (status & 0x08)
dev->stats.rx_crc_errors++; if (status & 0x04)
dev->stats.rx_fifo_errors++; return;
}
staticint pcnet32_rx(struct net_device *dev, int budget)
{ struct pcnet32_private *lp = netdev_priv(dev); int entry = lp->cur_rx & lp->rx_mod_mask; struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; int npackets = 0;
/* If we own the next entry, it's a new packet. Send it up. */ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
pcnet32_rx_entry(dev, lp, rxp, entry);
npackets += 1; /* * The docs say that the buffer length isn't touched, but Andrew * Boyd of QNX reports that some revs of the 79C965 clear it.
*/
rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after others are visible */
rxp->status = cpu_to_le16(0x8000);
entry = (++lp->cur_rx) & lp->rx_mod_mask;
rxp = &lp->rx_ring[entry];
}
return npackets;
}
staticint pcnet32_tx(struct net_device *dev)
{ struct pcnet32_private *lp = netdev_priv(dev); unsignedint dirty_tx = lp->dirty_tx; int delta; int must_restart = 0;
while (dirty_tx != lp->cur_tx) { int entry = dirty_tx & lp->tx_mod_mask; int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
if (status < 0) break; /* It still hasn't been Txed */
lp->tx_ring[entry].base = 0;
if (status & 0x4000) { /* There was a major error, log it. */ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
netif_err(lp, tx_err, dev, "Tx error status=%04x err_status=%08x\n",
status, err_status); if (err_status & 0x04000000)
dev->stats.tx_aborted_errors++; if (err_status & 0x08000000)
dev->stats.tx_carrier_errors++; if (err_status & 0x10000000)
dev->stats.tx_window_errors++; #ifndef DO_DXSUFLO if (err_status & 0x40000000) {
dev->stats.tx_fifo_errors++; /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */
netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
} #else if (err_status & 0x40000000) {
dev->stats.tx_fifo_errors++; if (!lp->dxsuflo) { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */
netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
} #endif
} else { if (status & 0x1800)
dev->stats.collisions++;
dev->stats.tx_packets++;
}
/* We must free the original skb */ if (lp->tx_skbuff[entry]) {
dma_unmap_single(&lp->pci_dev->dev,
lp->tx_dma_addr[entry],
lp->tx_skbuff[entry]->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
lp->tx_dma_addr[entry] = 0;
}
dirty_tx++;
}
spin_lock_irqsave(&lp->lock, flags); if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */
lp->a->reset(ioaddr);
lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev);
}
if (work_done < budget && napi_complete_done(napi, work_done)) { /* clear interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
/* Set interrupt enable. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
}
err = pci_enable_device(pdev); if (err < 0) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("failed to enable device -- err=%d\n", err); return err;
}
pci_set_master(pdev);
if (!pci_resource_len(pdev, 0)) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("card has no PCI IO resources, aborting\n");
err = -ENODEV; goto err_disable_dev;
}
err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK); if (err) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("architecture does not support 32bit PCI busmaster DMA\n"); goto err_disable_dev;
}
ioaddr = pci_resource_start(pdev, 0); if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("io address range already allocated\n");
err = -EBUSY; goto err_disable_dev;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
err_disable_dev: if (err < 0)
pci_disable_device(pdev);
/* pcnet32_probe1 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. * pdev will be NULL when called from pcnet32_probe_vlbus.
*/ staticint
pcnet32_probe1(unsignedlong ioaddr, int shared, struct pci_dev *pdev)
{ struct pcnet32_private *lp; int i, media; int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; conststruct pcnet32_access *a = NULL;
u8 promaddr[ETH_ALEN];
u8 addr[ETH_ALEN]; int ret = -ENODEV;
/* reset the chip */
pcnet32_wio_reset(ioaddr);
/* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
a = &pcnet32_wio;
} else {
pcnet32_dwio_reset(ioaddr); if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
pcnet32_dwio_check(ioaddr)) {
a = &pcnet32_dwio;
} else { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("No access methods\n"); goto err_release_region;
}
}
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
pr_info(" PCnet chip version is %#x\n", chip_version); if ((chip_version & 0xfff) != 0x003) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("Unsupported chip version\n"); goto err_release_region;
}
switch (chip_version) { case 0x2420:
chipname = "PCnet/PCI 79C970"; /* PCI */ break; case 0x2430: if (shared)
chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ else
chipname = "PCnet/32 79C965"; /* 486/VL bus */ break; case 0x2621:
chipname = "PCnet/PCI II 79C970A"; /* PCI */
fdx = 1; break; case 0x2623:
chipname = "PCnet/FAST 79C971"; /* PCI */
fdx = 1;
mii = 1;
fset = 1; break; case 0x2624:
chipname = "PCnet/FAST+ 79C972"; /* PCI */
fdx = 1;
mii = 1;
fset = 1; break; case 0x2625:
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
sram = 1; break; case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
fdx = 1; /* * This is based on specs published at www.amd.com. This section * assumes that a card with a 79C978 wants to go into standard * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, * and the module option homepna=1 can select this instead.
*/
media = a->read_bcr(ioaddr, 49);
media &= ~3; /* default to 10Mb ethernet */ if (cards_found < MAX_UNITS && homepna[cards_found])
media |= 1; /* switch to home wiring mode */ if (pcnet32_debug & NETIF_MSG_PROBE)
printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
(media & 1) ? "1" : "10");
a->write_bcr(ioaddr, 49, media); break; case 0x2627:
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
sram = 1; break; case 0x2628:
chipname = "PCnet/PRO 79C976";
fdx = 1;
mii = 1; break; default: if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("PCnet version %#x, no PCnet32 chip\n",
chip_version); goto err_release_region;
}
/* * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit * starting until the packet is loaded. Strike one for reliability, lose * one for latency - although on PCI this isn't a big loss. Older chips * have FIFO's smaller than a packet, so you can't do this. * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
*/
/* * The Am79C973/Am79C975 controllers come with 12K of SRAM * which we can use for the Tx/Rx buffers but most importantly, * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid * Tx fifo underflows.
*/ if (sram) { /* * The SRAM is being configured in two steps. First we * set the SRAM size in the BCR25:SRAM_SIZE bits. According * to the datasheet, each bit corresponds to a 512-byte * page so we can have at most 24 pages. The SRAM_SIZE * holds the value of the upper 8 bits of the 16-bit SRAM size. * The low 8-bits start at 0x00 and end at 0xff. So the * address range is from 0x0000 up to 0x17ff. Therefore, * the SRAM_SIZE is set to 0x17. The next step is to set * the BCR26:SRAM_BND midway through so the Tx and Rx * buffers can share the SRAM equally.
*/
a->write_bcr(ioaddr, 25, 0x17);
a->write_bcr(ioaddr, 26, 0xc); /* And finally enable the NOUFLO bit */
a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
}
dev = alloc_etherdev(sizeof(*lp)); if (!dev) {
ret = -ENOMEM; goto err_release_region;
}
if (pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("%s at %#3lx,", chipname, ioaddr);
/* In most chips, after a chip reset, the ethernet address is read from the * station address PROM at the base address and programmed into the * "Physical Address Registers" CSR12-14. * As a precautionary measure, we read the PROM values and complain if * they disagree with the CSRs. If they miscompare, and the PROM addr * is valid, then the PROM addr is used.
*/ for (i = 0; i < 3; i++) { unsignedint val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff; /* There may be endianness issues here. */
addr[2 * i] = val & 0x0ff;
addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
eth_hw_addr_set(dev, addr);
/* read PROM address and compare with CSR address */ for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
if (!ether_addr_equal(promaddr, dev->dev_addr) ||
!is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
eth_hw_addr_set(dev, promaddr);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->dev_addr)) { staticconst u8 zero_addr[ETH_ALEN] = {};
eth_hw_addr_set(dev, zero_addr);
}
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
/* Version 0x2623 and 0x2624 */ if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
pr_info(" tx_start_pt(0x%04x):", i); switch (i >> 10) { case 0:
pr_cont(" 20 bytes,"); break; case 1:
pr_cont(" 64 bytes,"); break; case 2:
pr_cont(" 128 bytes,"); break; case 3:
pr_cont("~220 bytes,"); break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
pr_cont(" BCR18(%x):", i & 0xffff); if (i & (1 << 5))
pr_cont("BurstWrEn "); if (i & (1 << 6))
pr_cont("BurstRdEn "); if (i & (1 << 7))
pr_cont("DWordIO "); if (i & (1 << 11))
pr_cont("NoUFlow ");
i = a->read_bcr(ioaddr, 25);
pr_info(" SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
pr_cont(" SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27); if (i & (1 << 14))
pr_cont("LowLatRx");
}
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev); /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
lp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(*lp->init_block),
&lp->init_dma_addr, GFP_KERNEL); if (!lp->init_block) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_err("Coherent memory allocation failed\n");
ret = -ENOMEM; goto err_free_netdev;
}
lp->pci_dev = pdev;
lp->dev = dev;
spin_lock_init(&lp->lock);
lp->name = chipname;
lp->shared_irq = shared;
lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
lp->tx_mod_mask = lp->tx_ring_size - 1;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
lp->mii_if.full_duplex = fdx;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
lp->dxsuflo = dxsuflo;
lp->mii = mii;
lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) ||
(options[cards_found] >= sizeof(options_mapping)))
lp->options = PCNET32_PORT_ASEL; else
lp->options = options_mapping[options[cards_found]]; /* force default port to TP on 79C970A so link detection can work */ if (lp->chip_version == PCNET32_79C970A)
lp->options = PCNET32_PORT_10BT;
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = mdio_read;
lp->mii_if.mdio_write = mdio_write;
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
/* prior to register_netdev, dev->name is not yet correct */ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
ret = -ENOMEM; goto err_free_ring;
} /* detect special T1/E1 WAN card by checking for MAC address */ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
dev->dev_addr[2] == 0x75)
lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
lp->init_block->tlen_rlen =
cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++)
lp->init_block->phys_addr[i] = dev->dev_addr[i];
lp->init_block->filter[0] = 0x00000000;
lp->init_block->filter[1] = 0x00000000;
lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
/* switch pcnet32 to 32bit mode */
a->write_bcr(ioaddr, 20, 2);
if (pdev) { /* use the IRQ provided by PCI */
dev->irq = pdev->irq; if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(" assigned IRQ %d\n", dev->irq);
} else { unsignedlong irq_mask = probe_irq_on();
/* * To auto-IRQ we enable the initialization-done and DMA error * interrupts. For ISA boards we get a DMA error, but VLB and PCI * boards will work.
*/ /* Trigger an initialization just for the interrupt. */
a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
mdelay(1);
dev->irq = probe_irq_off(irq_mask); if (!dev->irq) { if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(", failed to detect IRQ line\n");
ret = -ENODEV; goto err_free_ring;
} if (pcnet32_debug & NETIF_MSG_PROBE)
pr_cont(", probed IRQ %d\n", dev->irq);
}
/* Set the mii phy_id so that we can query the link state */ if (lp->mii) { /* lp->phycount and lp->phymask are set to 0 by memset above */
lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; /* scan for PHYs */ for (i = 0; i < PCNET32_MAX_PHYS; i++) { unsignedshort id1, id2;
id1 = mdio_read(dev, i, MII_PHYSID1); if (id1 == 0xffff) continue;
id2 = mdio_read(dev, i, MII_PHYSID2); if (id2 == 0xffff) continue; if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) continue; /* 79C971 & 79C972 have phantom phy at id 31 */
lp->phycount++;
lp->phymask |= (1 << i);
lp->mii_if.phy_id = i; if (pcnet32_debug & NETIF_MSG_PROBE)
pr_info("Found PHY %04x:%04x at address %d\n",
id1, id2, i);
}
lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
}
netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags); /* Check for a valid station address */ if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EINVAL; goto err_free_irq;
}
/* Reset the PCNET32 */
lp->a->reset(ioaddr);
/* switch pcnet32 to 32bit mode */
lp->a->write_bcr(ioaddr, 20, 2);
/* set/reset autoselect bit */
val = lp->a->read_bcr(ioaddr, 2) & ~2; if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
lp->a->write_bcr(ioaddr, 2, val);
/* handle full duplex setting */ if (lp->mii_if.full_duplex) {
val = lp->a->read_bcr(ioaddr, 9) & ~3; if (lp->options & PCNET32_PORT_FD) {
val |= 1; if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
val |= 2;
} elseif (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ if (lp->chip_version == 0x2627)
val |= 3;
}
lp->a->write_bcr(ioaddr, 9, val);
}
/* set/reset GPSI bit in test register */
val = lp->a->read_csr(ioaddr, 124) & ~0x10; if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
val |= 0x10;
lp->a->write_csr(ioaddr, 124, val);
/* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
netif_printk(lp, link, KERN_DEBUG, dev, "Setting 100Mb-Full Duplex\n");
}
} if (lp->phycount < 2) { /* * 24 Jun 2004 according AMD, in order to change the PHY, * DANAS (or DISPM for 79C976) must be set; then select the speed, * duplex, and/or enable auto negotiation, and clear DANAS
*/ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
lp->a->write_bcr(ioaddr, 32,
lp->a->read_bcr(ioaddr, 32) | 0x0080); /* disable Auto Negotiation, set 10Mpbs, HD */
val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; if (lp->options & PCNET32_PORT_FD)
val |= 0x10; if (lp->options & PCNET32_PORT_100)
val |= 0x08;
lp->a->write_bcr(ioaddr, 32, val);
} else { if (lp->options & PCNET32_PORT_ASEL) {
lp->a->write_bcr(ioaddr, 32,
lp->a->read_bcr(ioaddr,
32) | 0x0080); /* enable auto negotiate, setup, disable fd */
val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
val |= 0x20;
lp->a->write_bcr(ioaddr, 32, val);
}
}
} else { int first_phy = -1;
u16 bmcr;
u32 bcr9; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
/* * There is really no good other way to handle multiple PHYs * other than turning off all automatics
*/
val = lp->a->read_bcr(ioaddr, 2);
lp->a->write_bcr(ioaddr, 2, val & ~2);
val = lp->a->read_bcr(ioaddr, 32);
lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
if (pcnet32_init_ring(dev)) {
rc = -ENOMEM; goto err_free_ring;
}
napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
netif_start_queue(dev);
if (lp->chip_version >= PCNET32_79C970A) { /* Print the link status and start the watchdog */
pcnet32_check_media(dev, 1);
mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
}
i = 0; while (i++ < 100) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) break; /* * We used to clear the InitDone bit, 0x0100, here but Mark Stockton * reports that doing so triggers a bug in the '974.
*/
lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.