/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ staticunsignedint max_interrupt_work = 25;
#define MAX_UNITS 8 /* Used to pass the full-duplex flag, etc. */ staticint full_duplex[MAX_UNITS]; staticint options[MAX_UNITS]; staticint mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
/* The possible media types that can be set in options[] are: */ constchar * const medianame[32] = { "10baseT", "10base2", "AUI", "100baseTx", "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx", "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII", "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4", "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19", "","","","", "","","","", "","","","Transceiver reset",
};
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ #ifdefined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ defined(CONFIG_SPARC) || defined(__ia64__) || \ defined(__sh__) || defined(__mips__) staticint rx_copybreak = 1518; #else staticint rx_copybreak = 100; #endif
/* Set the bus performance register. Typical: Set 16 longword cache alignment, no burst limit. Cache alignment bits 15:14 Burst length 13:8 0000 No alignment 0x00000000 unlimited 0800 8 longwords 4000 8 longwords 0100 1 longword 1000 16 longwords 8000 16 longwords 0200 2 longwords 2000 32 longwords C000 32 longwords 0400 4 longwords Warning: many older 486 systems are broken and require setting 0x00A04800 8 longword cache alignment, 8 longword burst. ToDo: Non-Intel setting could be better.
*/
#ifdefined(__alpha__) || defined(__ia64__) staticint csr0 = 0x01A00000 | 0xE000; #elifdefined(__i386__) || defined(__powerpc__) || defined(__x86_64__) staticint csr0 = 0x01A00000 | 0x8000; #elifdefined(CONFIG_SPARC) || defined(__hppa__) /* The UltraSparc PCI controllers will disconnect at every 64-byte * crossing anyways so it makes no sense to tell Tulip to burst * any more than that.
*/ staticint csr0 = 0x01A00000 | 0x9000; #elifdefined(__arm__) || defined(__sh__) staticint csr0 = 0x01A00000 | 0x4800; #elifdefined(__mips__) staticint csr0 = 0x00200000 | 0x4000; #else staticint csr0; #endif
/* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (4*HZ)
/* Wake the chip from sleep/snooze mode. */
tulip_set_power_state (tp, 0, 0);
/* Disable all WOL events */
pci_enable_wake(tp->pdev, PCI_D3hot, 0);
pci_enable_wake(tp->pdev, PCI_D3cold, 0);
tulip_set_wolopts(tp->pdev, 0);
/* On some chip revs we must set the MII/SYM port before the reset!? */ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
iowrite32(0x00040000, ioaddr + CSR6);
/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
iowrite32(0x00000001, ioaddr + CSR0);
pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
udelay(100);
/* Deassert reset. Wait the specified 50 PCI cycles after a reset by initializing
Tx and Rx queues and the address filter list. */
iowrite32(tp->csr0, ioaddr + CSR0);
pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
udelay(100);
if (tulip_debug > 1)
netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
/* 21140 bug: you must add the broadcast address. */
memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); /* Fill the final entry of the table with our physical address. */
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
tp->timer.expires = RUN_AT(next_tick);
add_timer(&tp->timer); #ifdef CONFIG_TULIP_NAPI
timer_setup(&tp->oom_timer, oom_timer, 0); #endif
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ staticvoid tulip_init_ring(struct net_device *dev)
{ struct tulip_private *tp = netdev_priv(dev); int i;
tp->susp_rx = 0;
tp->ttimer = 0;
tp->nir = 0;
for (i = 0; i < RX_RING_SIZE; i++) {
tp->rx_ring[i].status = 0x00000000;
tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
tp->rx_buffers[i].skb = NULL;
tp->rx_buffers[i].mapping = 0;
} /* Mark the last entry as wrapping the ring. */
tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
dma_addr_t mapping;
/* Note the receive buffer must be longword aligned. netdev_alloc_skb() provides 16 byte alignment. But do *not*
use skb_reserve() to align the IP header! */ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
tp->rx_buffers[i].skb = skb; if (skb == NULL) break;
mapping = dma_map_single(&tp->pdev->dev, skb->data,
PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[i].mapping = mapping;
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
}
tp->dirty_rx = (unsignedint)(i - RX_RING_SIZE);
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) {
tp->tx_buffers[i].skb = NULL;
tp->tx_buffers[i].mapping = 0;
tp->tx_ring[i].status = 0x00000000;
tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
}
tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
}
if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
flag = 0x60000000; /* No interrupt */
} elseif (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
flag = 0xe0000000; /* Tx-done intr. */
} elseif (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
flag = 0x60000000; /* No Tx-done intr. */
} else { /* Leave room for set_rx_mode() to fill entries. */
flag = 0xe0000000; /* Tx-done intr. */
netif_stop_queue(dev);
} if (entry == TX_RING_SIZE-1)
flag = 0xe0000000 | DESC_RING_WRAP;
tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); /* if we were using Transmit Automatic Polling, we would need a
* wmb() here. */
tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
wmb();
for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status);
if (status < 0) {
tp->dev->stats.tx_errors++; /* It wasn't Txed */
tp->tx_ring[entry].status = 0;
}
/* Check for Tx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping)
dma_unmap_single(&tp->pdev->dev,
tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame),
DMA_TO_DEVICE); continue;
}
/* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = tp->rx_buffers[i].skb;
dma_addr_t mapping = tp->rx_buffers[i].mapping;
case SIOCSMIIREG: /* Write MII PHY register. */ if (regnum & ~0x1f) return -EINVAL; if (data->phy_id == phy) {
u16 value = data->val_in; switch (regnum) { case 0: /* Check for autonegotiation on or reset. */
tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; if (tp->full_duplex_lock)
tp->full_duplex = (value & 0x0100) ? 1 : 0; break; case 4:
tp->advertising[phy_idx] =
tp->mii_advertise = data->val_in; break;
}
} if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
u16 value = data->val_in; if (regnum == 0) { if ((value & 0x1200) == 0x1200) { if (tp->chip_id == PNIC2) {
pnic2_start_nway (dev);
} else {
t21142_start_nway (dev);
}
}
} elseif (regnum == 4)
tp->sym_advertise = value;
} else {
tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
} return 0; default: return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
/* Set or clear the multicast filter for this adaptor. Note that we only use exclusion around actually queueing the new frame, not around filling tp->setup_frame. This is non-deterministic
when re-entered but still correct. */
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */
netdev_for_each_mc_addr(ha, dev) { int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
__set_bit_le(index, hash_table);
} for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
*setup_frm++ = hash_table[i];
}
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
}
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */ if (netdev_mc_count(dev) > 14) { /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
build_setup_frame_perfect(tp->setup_frame, dev);
}
spin_lock_irqsave(&tp->lock, flags);
if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { /* Same setup recently queued, we need not add it. */
} else { unsignedint entry; int dummy = -1;
/* Now add this frame to the Tx list. */
entry = tp->cur_tx++ % TX_RING_SIZE;
if (entry != 0) { /* Avoid a chip errata by prefixing a dummy entry. */
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping = 0;
tp->tx_ring[entry].length =
(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
tp->tx_ring[entry].buffer1 = 0; /* Must set DescOwned later to avoid race with chip */
dummy = entry;
entry = tp->cur_tx++ % TX_RING_SIZE;
}
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping =
dma_map_single(&tp->pdev->dev,
tp->setup_frame, sizeof(tp->setup_frame),
DMA_TO_DEVICE); /* Put the setup frame on the Tx list. */ if (entry == TX_RING_SIZE-1)
tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
tp->tx_ring[entry].buffer1 =
cpu_to_le32(tp->tx_buffers[entry].mapping);
tp->tx_ring[entry].status = cpu_to_le32(DescOwned); if (dummy >= 0)
tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
netif_stop_queue(dev);
if (tulip_debug > 3)
netdev_dbg(dev, "tulip_mwi_config()\n");
tp->csr0 = csr0 = 0;
/* if we have any cache line size at all, we can do MRM and MWI */
csr0 |= MRM | MWI;
/* Enable MWI in the standard PCI command bit. * Check for the case where MWI is desired but not available
*/
pci_try_set_mwi(pdev);
/* read result from hardware (in case bit refused to enable) */
pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
csr0 &= ~MWI;
/* if cache line size hardwired to zero, no MWI */
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); if ((csr0 & MWI) && (cache == 0)) {
csr0 &= ~MWI;
pci_clear_mwi(pdev);
}
/* * DM910x chips should be handled by the dmfe driver, except * on-board chips on SPARC systems. Also, early DM9100s need * software CRC which only the dmfe driver supports.
*/
#ifdef CONFIG_TULIP_DM910X if (chip_idx == DM910X) { struct device_node *dp;
if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
pdev->revision < 0x30) {
pr_info("skipping early DM9100 with Crc bug (use dmfe)\n"); return -ENODEV;
}
/* * Looks for early PCI chipsets where people report hangs * without the workarounds being on.
*/
/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned. Aries might need this too. The Saturn errata are not pretty reading but thankfully it's an old 486 chipset.
2. The dreaded SiS496 486 chipset. Same workaround as Intel Saturn.
*/
/* bugfix: the ASIX must have a burst limit or horrible things happen. */ if (chip_idx == AX88140) { if ((csr0 & 0x3f00) == 0)
csr0 |= 0x2000;
}
/* PNIC doesn't have MWI/MRL/MRM... */ if (chip_idx == LC82C168)
csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ if (tulip_uli_dm_quirk(pdev)) {
csr0 &= ~0x01f100ff; #ifdefined(CONFIG_SPARC)
csr0 = (csr0 & ~0xff00) | 0xe000; #endif
} /* * And back to business
*/
i = pcim_enable_device(pdev); if (i) {
pr_err("Cannot enable tulip board #%d, aborting\n", board_idx); return i;
}
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp)); if (!dev) return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
pci_name(pdev),
(unsignedlonglong)pci_resource_len (pdev, 0),
(unsignedlonglong)pci_resource_start (pdev, 0)); return -ENODEV;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */ if (pcim_request_all_regions(pdev, DRV_NAME)) return -ENODEV;
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
pci_set_master(pdev);
#ifdef CONFIG_GSC if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { switch (pdev->subsystem_device) { default: break; case 0x1061: case 0x1062: case 0x1063: case 0x1098: case 0x1099: case 0x10EE:
tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
chip_name = "GSC DS21140 Tulip";
}
} #endif
/* Clear the missed-packet counter. */
ioread32(ioaddr + CSR8);
/* The station address ROM is read byte serially. The register must be polled, waiting for the value to be read bit serially from the EEPROM.
*/
ee_data = tp->eeprom;
memset(ee_data, 0, sizeof(tp->eeprom));
sum = 0; if (chip_idx == LC82C168) { for (i = 0; i < 3; i++) { int value, boguscnt = 100000;
iowrite32(0x600 | i, ioaddr + 0x98); do {
value = ioread32(ioaddr + CSR9);
} while (value < 0 && --boguscnt > 0);
put_unaligned_le16(value, ((__le16 *)addr) + i);
sum += value & 0xffff;
}
eth_hw_addr_set(dev, addr);
} elseif (chip_idx == COMET) { /* No need to read the EEPROM. */
put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
eth_hw_addr_set(dev, addr); for (i = 0; i < 6; i ++)
sum += dev->dev_addr[i];
} else { /* A serial EEPROM interface, we read now and sort it out later. */ int sa_offset = 0; int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6; int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
if (ee_max_addr > sizeof(tp->eeprom))
ee_max_addr = sizeof(tp->eeprom);
for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
ee_data[i] = data & 0xff;
ee_data[i + 1] = data >> 8;
}
/* DEC now has a specification (see Notes) but early board makers
just put the address in the first EEPROM locations. */ /* This does memcmp(ee_data, ee_data+16, 8) */ for (i = 0; i < 8; i ++) if (ee_data[i] != ee_data[16+i])
sa_offset = 20; if (chip_idx == CONEXANT) { /* Check that the tuple type and length is correct. */ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
sa_offset = 0x19A;
} elseif (ee_data[0] == 0xff && ee_data[1] == 0xff &&
ee_data[2] == 0) {
sa_offset = 2; /* Grrr, damn Matrox boards. */
} #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) &&
((PCI_SLOT(pdev->devfn) == 7) ||
(PCI_SLOT(pdev->devfn) == 12))) { /* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0; /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
} #endif #ifdef CONFIG_GSC /* Check to see if we have a broken srom */ if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { /* pci_vendor_id and subsystem_id are swapped */
ee_data[0] = ee_data[2];
ee_data[1] = ee_data[3];
ee_data[2] = 0x61;
ee_data[3] = 0x10;
/* HSC-PCI boards need to be byte-swaped and shifted * up 1 word. This shift needs to happen at the end * of the MAC first because of the 2 byte overlap.
*/ for (i = 4; i >= 0; i -= 2) {
ee_data[17 + i + 3] = ee_data[17 + i];
ee_data[16 + i + 5] = ee_data[16 + i];
}
} #endif
for (i = 0; i < 6; i ++) {
addr[i] = ee_data[i + sa_offset];
sum += ee_data[i + sa_offset];
}
eth_hw_addr_set(dev, addr);
} /* Lite-On boards have the address byte-swapped. */ if ((dev->dev_addr[0] == 0xA0 ||
dev->dev_addr[0] == 0xC0 ||
dev->dev_addr[0] == 0x02) &&
dev->dev_addr[1] == 0x00) { for (i = 0; i < 6; i+=2) {
addr[i] = dev->dev_addr[i+1];
addr[i+1] = dev->dev_addr[i];
}
eth_hw_addr_set(dev, addr);
}
/* On the Zynx 315 Etherarray and other multiport boards only the first Tulip has an EEPROM. On Sparc systems the mac address is held in the OBP property "local-mac-address". The addresses of the subsequent ports are derived from the first. Many PCI BIOSes also incorrectly report the IRQ line, so we correct
that here as well. */ if (sum == 0 || sum == 6*0xff) { #ifdefined(CONFIG_SPARC) struct device_node *dp = pci_device_to_OF_node(pdev); constunsignedchar *addr2; int len; #endif
eeprom_missing = 1; for (i = 0; i < 5; i++)
addr[i] = last_phys_addr[i];
addr[i] = last_phys_addr[i] + 1;
eth_hw_addr_set(dev, addr); #ifdefined(CONFIG_SPARC)
addr2 = of_get_property(dp, "local-mac-address", &len); if (addr2 && len == ETH_ALEN)
eth_hw_addr_set(dev, addr2); #endif #ifdefined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq)
irq = last_irq; #endif
}
for (i = 0; i < 6; i++)
last_phys_addr[i] = dev->dev_addr[i]; #ifdefined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
last_irq = irq; #endif
/* The lower four bits are the media type. */ if (board_idx >= 0 && board_idx < MAX_UNITS) { if (options[board_idx] & MEDIA_MASK)
tp->default_port = options[board_idx] & MEDIA_MASK; if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
tp->full_duplex = 1; if (mtu[board_idx] > 0)
dev->mtu = mtu[board_idx];
} if (dev->mem_start & MEDIA_MASK)
tp->default_port = dev->mem_start & MEDIA_MASK; if (tp->default_port) {
pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
board_idx, medianame[tp->default_port & MEDIA_MASK]);
tp->medialock = 1; if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
tp->full_duplex = 1;
} if (tp->full_duplex)
tp->full_duplex_lock = 1;
if ((tp->flags & ALWAYS_CHECK_MII) ||
(tp->mtable && tp->mtable->has_mii) ||
( ! tp->mtable && (tp->flags & HAS_MII))) { if (tp->mtable && tp->mtable->has_mii) { for (i = 0; i < tp->mtable->leafcount; i++) if (tp->mtable->mleaf[i].media == 11) {
tp->cur_index = i;
tp->saved_if_port = dev->if_port;
tulip_select_media(dev, 2);
dev->if_port = tp->saved_if_port; break;
}
}
/* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs
later, but takes much time. */
tulip_find_mii (dev, board_idx);
}
/* The Tulip-specific entries in the device structure. */
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI
netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16); #endif
dev->ethtool_ops = &ops;
i = register_netdev(dev); if (i) return i;
pci_set_drvdata(pdev, dev);
dev_info(&dev->dev, #ifdef CONFIG_TULIP_MMIO "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n", #else "%s rev %d at Port %#llx,%s %pM, IRQ %d\n", #endif
chip_name, pdev->revision,
(unsignedlonglong)pci_resource_start(pdev, TULIP_BAR),
eeprom_missing ? " EEPROM not present," : "",
dev->dev_addr, irq);
#ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing.
*/
/* disable_irq here is not very nice, but with the lockless
interrupt handler we have no other choice. */
disable_irq(irq);
tulip_interrupt (irq, dev);
enable_irq(irq);
} #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.