// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This driver is designed for the Broadcom SiByte SOC built-in * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. * * Updated to the driver model and the PHY abstraction layer * by Maciej W. Rozycki.
*/
/* * This stuff is used to identify the channel and the registers * associated with it.
*/ struct sbmac_softc *sbdma_eth; /* back pointer to associated
MAC */ int sbdma_channel; /* channel number */ int sbdma_txdir; /* direction (1=transmit) */ int sbdma_maxdescr; /* total # of descriptors
in ring */ #ifdef CONFIG_SBMAC_COALESCE int sbdma_int_pktcnt; /* # descriptors rx/tx
before interrupt */ int sbdma_int_timeout; /* # usec rx/tx interrupt */ #endif void __iomem *sbdma_config0; /* DMA config register 0 */ void __iomem *sbdma_config1; /* DMA config register 1 */ void __iomem *sbdma_dscrbase; /* descriptor base address */ void __iomem *sbdma_dscrcnt; /* descriptor count register */ void __iomem *sbdma_curdscr; /* current descriptor
address */ void __iomem *sbdma_oodpktlost; /* pkt drop (rx only) */
/* * This stuff is for maintenance of the ring
*/ void *sbdma_dscrtable_unaligned; struct sbdmadscr *sbdma_dscrtable; /* base of descriptor table */ struct sbdmadscr *sbdma_dscrtable_end; /* end of descriptor table */ struct sk_buff **sbdma_ctxtable; /* context table, one
per descr */
dma_addr_t sbdma_dscrtable_phys; /* and also the phys addr */ struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ struct sbdmadscr *sbdma_remptr; /* next dscr for sw
to remove */
};
enum sbmac_speed sbm_speed; /* current speed */ enum sbmac_duplex sbm_duplex; /* current duplex */ enum sbmac_fc sbm_fc; /* cur. flow control setting */ int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */
unsignedchar sbm_hwaddr[ETH_ALEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; int rx_hw_checksum; int sbe_idx;
};
/********************************************************************** * SBMAC_MII_SYNC(sbm_mdio) * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * * Return value: * nothing
********************************************************************* */
staticvoid sbmac_mii_sync(void __iomem *sbm_mdio)
{ int cnt;
uint64_t bits; int mac_mdio_genc;
/********************************************************************** * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. * * Input parameters: * sbm_mdio - address of the MAC's MDIO register * data - data to send * bitcnt - number of bits to send
********************************************************************* */
staticvoid sbmac_mii_senddata(void __iomem *sbm_mdio, unsignedint data, int bitcnt)
{ int i;
uint64_t bits; unsignedint curmask; int mac_mdio_genc;
for (i = 0; i < bitcnt; i++) { if (data & curmask)
bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT;
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
curmask >>= 1;
}
}
/********************************************************************** * SBMAC_MII_READ(bus, phyaddr, regidx) * Read a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY's address * regnum - index of register to read * * Return value: * value read, or 0xffff if an error occurred.
********************************************************************* */
staticint sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{ struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int idx; int error; int regval; int mac_mdio_genc;
/* * Synchronize ourselves so that the PHY knows the next * thing coming down is a command
*/
sbmac_mii_sync(sbm_mdio);
/* * Send the data to the PHY. The sequence is * a "start" command (2 bits) * a "read" command (2 bits) * the PHY addr (5 bits) * the register index (5 bits)
*/
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
sbmac_mii_senddata(sbm_mdio, regidx, 5);
/* * Switch the port around without a clock transition.
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
/* * Send out a clock pulse to signal we want the status
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
sbm_mdio);
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
/* * If an error occurred, the PHY will signal '1' back
*/
error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
/* * Issue an 'idle' clock pulse, but keep the direction * the same.
*/
__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
sbm_mdio);
__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
regval = 0;
for (idx = 0; idx < 16; idx++) {
regval <<= 1;
if (error == 0) { if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
regval |= 1;
}
/* Switch back to output */
__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
if (error == 0) return regval; return 0xffff;
}
/********************************************************************** * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) * * Write a value to a PHY register. * * Input parameters: * bus - MDIO bus handle * phyaddr - PHY to use * regidx - register within the PHY * regval - data to write to register * * Return value: * 0 for success
********************************************************************* */
staticint sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 regval)
{ struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; void __iomem *sbm_mdio = sc->sbm_mdio; int mac_mdio_genc;
/********************************************************************** * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) * * Initialize a DMA channel context. Since there are potentially * eight DMA channels per MAC, it's nice to do this in a standard * way. * * Input parameters: * d - struct sbmacdma (DMA channel context) * s - struct sbmac_softc (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors * * Return value: * nothing
********************************************************************* */
staticvoid sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, int txrx, int maxdescr)
{ #ifdef CONFIG_SBMAC_COALESCE int int_pktcnt, int_timeout; #endif
/* * Save away interesting stuff in the structure
*/
/* * The descriptor table must be aligned to at least 16 bytes or the * MAC will corrupt it.
*/
d->sbdma_dscrtable = (struct sbdmadscr *)
ALIGN((unsignedlong)d->sbdma_dscrtable_unaligned, sizeof(*d->sbdma_dscrtable));
/********************************************************************** * SBDMA_CHANNEL_START(d) * * Initialize the hardware registers for a DMA channel. * * Input parameters: * d - DMA channel to init (context must be previously init'd * rxtx - DMA_RX or DMA_TX depending on what type of channel * * Return value: * nothing
********************************************************************* */
staticvoid sbdma_channel_start(struct sbmacdma *d, int rxtx)
{ /* * Turn on the DMA channel
*/
/********************************************************************** * SBDMA_ADD_RCVBUFFER(d,sb) * * Add a buffer to the specified DMA channel. For receive channels, * this queues a buffer for inbound packets. * * Input parameters: * sc - softc structure * d - DMA channel descriptor * sb - sk_buff to add, or NULL if we should allocate one * * Return value: * 0 if buffer could not be added (ring is full) * 1 if buffer added successfully
********************************************************************* */
/* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full
*/
if (nextdsc == d->sbdma_remptr) { return -ENOSPC;
}
/* * Allocate a sk_buff if we don't already have one. * If we do have an sk_buff, reset it so that it's empty. * * Note: sk_buffs don't seem to be guaranteed to have any sort * of alignment when they are allocated. Therefore, allocate enough * extra space to make sure that: * * 1. the data does not start in the middle of a cache line. * 2. The data does not end in the middle of a cache line * 3. The buffer can be aligned such that the IP addresses are * naturally aligned. * * Remember, the SOCs MAC writes whole cache lines at a time, * without reading the old contents first. So, if the sk_buff's * data portion starts in the middle of a cache line, the SOC * DMA will trash the beginning (and ending) portions.
*/
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
} else {
sb_new = sb; /* * nothing special to reinit buffer, it's already aligned * and sb->data already points to a good place.
*/
}
/* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full
*/
if (nextdsc == d->sbdma_remptr) { return -ENOSPC;
}
/* * Under Linux, it's not necessary to copy/coalesce buffers * like it is on NetBSD. We think they're all contiguous, * but that may not be true for GBE.
*/
length = sb->len;
/* * fill in the descriptor. Note that the number of cache * blocks in the descriptor is the number of blocks * *spanned*, so we need to add in the offset (if any) * while doing the calculation.
*/
/********************************************************************** * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) * * Process "completed" receive buffers on the specified DMA channel. * * Input parameters: * sc - softc structure * d - DMA channel context * work_to_do - no. of packets to process before enabling interrupt * again (for NAPI) * poll - 1: using polling (for NAPI) * * Return value: * nothing
********************************************************************* */
staticint sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, int work_to_do, int poll)
{ struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; int len; int work_done = 0; int dropped = 0;
prefetch(d);
again: /* Check if the HW dropped any frames */
dev->stats.rx_fifo_errors
+= __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
while (work_to_do-- > 0) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
/* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now.
*/
if (curidx == hwidx) goto done;
/* * Otherwise, get the packet's sk_buff ptr back
*/
/* * Check packet status. If good, process it. * If not, silently drop it and put it back on the * receive ring.
*/
if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
/* * Add a new buffer to replace the old one. If we fail * to allocate a buffer, we're going to drop this * packet and put it right back on the receive ring.
*/
if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
-ENOBUFS)) {
dev->stats.rx_dropped++; /* Re-add old buffer */
sbdma_add_rcvbuffer(sc, d, sb); /* No point in continuing at the moment */
printk(KERN_ERR "dropped packet (1)\n");
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done;
} else { /* * Set length into the packet
*/
skb_put(sb,len);
/* * Buffer has been replaced on the * receive ring. Pass the buffer to * the kernel
*/
sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); /* Check hw IPv4/TCP checksum if supported */ if (sc->rx_hw_checksum == ENABLE) { if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
!((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
sb->ip_summed = CHECKSUM_UNNECESSARY; /* don't need to set sb->csum */
} else {
skb_checksum_none_assert(sb);
}
}
prefetch(sb->data);
prefetch((constvoid *)(((char *)sb->data)+32)); if (poll)
dropped = netif_receive_skb(sb); else
dropped = netif_rx(sb);
if (dropped == NET_RX_DROP) {
dev->stats.rx_dropped++;
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done;
} else {
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
}
}
} else { /* * Packet was mangled somehow. Just drop it and * put it back on the receive ring.
*/
dev->stats.rx_errors++;
sbdma_add_rcvbuffer(sc, d, sb);
}
/* * .. and advance to the next buffer.
*/
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
work_done++;
} if (!poll) {
work_to_do = 32; goto again; /* collect fifo drop statistics again */
}
done: return work_done;
}
/********************************************************************** * SBDMA_TX_PROCESS(sc,d) * * Process "completed" transmit buffers on the specified DMA channel. * This is normally called within the interrupt service routine. * Note that this isn't really ideal for priority channels, since * it processes all of the packets on a given channel before * returning. * * Input parameters: * sc - softc structure * d - DMA channel context * poll - 1: using polling (for NAPI) * * Return value: * nothing
********************************************************************* */
staticvoid sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, int poll)
{ struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; struct sbdmadscr *dsc; struct sk_buff *sb; unsignedlong flags; int packets_handled = 0;
spin_lock_irqsave(&(sc->sbm_lock), flags);
if (d->sbdma_remptr == d->sbdma_addptr) goto end_unlock;
for (;;) { /* * figure out where we are (as an index) and where * the hardware is (also as an index) * * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
curidx = d->sbdma_remptr - d->sbdma_dscrtable;
/* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now.
*/
if (curidx == hwidx) break;
/* * Otherwise, get the packet's sk_buff ptr back
*/
/********************************************************************** * SBMAC_INITCTX(s) * * Initialize an Ethernet context structure - this is called * once per MAC on the 1250. Memory is allocated here, so don't * call it again from inside the ioctl routines that bring the * interface up/down * * Input parameters: * s - sbmac context structure * * Return value: * 0
********************************************************************* */
/* * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above * Use a larger RD_THRSH for gigabit
*/ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
th_value = 28; else
th_value = 64;
fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
((s->sbm_speed == sbmac_speed_1000)
? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
V_MAC_TX_RL_THRSH(4) |
V_MAC_RX_PL_THRSH(4) |
V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
V_MAC_RX_RL_THRSH(8) |
0;
/********************************************************************** * SBMAC_SET_CHANNEL_STATE(state) * * Set the channel's state ON or OFF * * Input parameters: * state - new state * * Return value: * old state
********************************************************************* */ staticenum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, enum sbmac_state state)
{ enum sbmac_state oldstate = sc->sbm_state;
/* Hard code the off set to 15 for now */
reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
__raw_writeq(reg, sc->sbm_rxfilter);
/********************************************************************** * SBMAC_ADDR2REG(ptr) * * Convert six bytes into the 64-bit register value that * we typically write into the SBMAC's address/mcast registers * * Input parameters: * ptr - pointer to 6 bytes * * Return value: * register value
********************************************************************* */
/********************************************************************** * SBMAC_SET_SPEED(s,speed) * * Configure LAN speed for the specified MAC. * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * speed - speed to set MAC to (see enum sbmac_speed) * * Return value: * 1 if successful * 0 indicates invalid parameters
********************************************************************* */
/********************************************************************** * SBMAC_SET_DUPLEX(s,duplex,fc) * * Set Ethernet duplex and flow control options for this MAC * Warning: must be called when MAC is off! * * Input parameters: * s - sbmac structure * duplex - duplex setting (see enum sbmac_duplex) * fc - flow control setting (see enum sbmac_fc) * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified
********************************************************************* */
switch (duplex) { case sbmac_duplex_half: switch (fc) { case sbmac_fc_disabled:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; break;
case sbmac_fc_collision:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; break;
case sbmac_fc_carrier:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break;
case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0;
} break;
case sbmac_duplex_full: switch (fc) { case sbmac_fc_disabled:
cfg |= V_MAC_FC_CMD_DISABLED; break;
case sbmac_fc_frame:
cfg |= V_MAC_FC_CMD_ENABLED; break;
case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ default: return 0;
} break; default: return 0;
}
if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { if (napi_schedule_prep(&sc->napi)) {
__raw_writeq(0, sc->sbm_imr);
__napi_schedule(&sc->napi); /* Depend on the exit from poll to reenable intr */
} else { /* may leave some packets behind */
sbdma_rx_process(sc,&(sc->sbm_rxdma),
SBMAC_MAX_RXDESCR * 2, 0);
}
} return IRQ_RETVAL(handled);
}
/********************************************************************** * SBMAC_START_TX(skb,dev) * * Start output on the specified interface. Basically, we * queue as many buffers as we can until the ring fills up, or * we run off the end of the queue, whichever comes first. * * Input parameters: * * * Return value: * nothing
********************************************************************* */ static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
{ struct sbmac_softc *sc = netdev_priv(dev); unsignedlong flags;
/* lock eth irq */
spin_lock_irqsave(&sc->sbm_lock, flags);
/* * Put the buffer on the transmit ring. If we * don't have room, stop the queue.
*/
if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { /* XXX save skb that we could not send */
netif_stop_queue(dev);
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
}
/********************************************************************** * SBMAC_SETMULTI(sc) * * Reprogram the multicast table into the hardware, given * the list of multicasts associated with the interface * structure. * * Input parameters: * sc - softc * * Return value: * nothing
********************************************************************* */
/* * Clear out entire multicast table. We do this by nuking * the entire hash table and all the direct matches except * the first one, which is used for our station address
*/
for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
__raw_writeq(0, port);
}
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
__raw_writeq(0, port);
}
/* * Clear the filter to say we don't want any multicasts.
*/
if (dev->flags & IFF_ALLMULTI) { /* * Enable ALL multicasts. Do this by inverting the * multicast enable bit.
*/
reg = __raw_readq(sc->sbm_rxfilter);
reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
__raw_writeq(reg, sc->sbm_rxfilter); return;
}
/* * Progam new multicast entries. For now, only use the * perfect filter. In the future we'll need to use the * hash filter if the perfect filter overflows
*/
/* XXX only using perfect filter for now, need to use hash
* XXX if the table overflows */
idx = 1; /* skip station address */
netdev_for_each_mc_addr(ha, dev) { if (idx == MAC_ADDR_COUNT) break;
reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
}
/* * Enable the "accept multicast bits" if we programmed at least one * multicast.
*/
err = register_netdev(dev); if (err) {
printk(KERN_ERR "%s.%d: unable to register netdev\n",
sbmac_string, idx); goto unreg_mdio;
}
pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
if (sc->rx_hw_checksum == ENABLE)
pr_info("%s: enabling TCP rcv checksum\n", dev->name);
/* * Display Ethernet address (this is called during the config * process so we need to finish off the config message that * was being displayed)
*/
pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
dev->name, base, eaddr);
phy_dev = phy_find_first(sc->mii_bus); if (!phy_dev) {
printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENXIO;
}
phy_dev = phy_connect(dev, dev_name(&phy_dev->mdio.dev),
&sbmac_mii_poll, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); return PTR_ERR(phy_dev);
}
/* Remove any features not supported by the controller */
phy_set_max_speed(phy_dev, SPEED_1000);
phy_support_asym_pause(phy_dev);
if (!phy_dev->link) { if (link_chg) {
sc->sbm_link = phy_dev->link;
sc->sbm_speed = sbmac_speed_none;
sc->sbm_duplex = sbmac_duplex_none;
sc->sbm_fc = sbmac_fc_disabled;
sc->sbm_pause = -1;
pr_info("%s: link unavailable\n", dev->name);
} return;
}
if (phy_dev->duplex == DUPLEX_FULL) { if (phy_dev->pause)
fc = sbmac_fc_frame; else
fc = sbmac_fc_disabled;
} else
fc = sbmac_fc_collision;
fc_chg = (sc->sbm_fc != fc);
res = platform_get_resource(pldev, IORESOURCE_MEM, 0); if (!res) {
printk(KERN_ERR "%s: failed to get resource\n",
dev_name(&pldev->dev));
err = -EINVAL; goto out_out;
}
sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
err = -ENOMEM; goto out_out;
}
/* * The R_MAC_ETHERNET_ADDR register will be set to some nonzero * value for us by the firmware if we're going to use this MAC. * If we find a zero, skip this MAC.
*/
sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
sbmac_orig_hwaddr ? "" : "not ", (longlong)res->start); if (sbmac_orig_hwaddr == 0) {
err = 0; goto out_unmap;
}
/* * Okay, cool. Initialize this MAC.
*/
dev = alloc_etherdev(sizeof(struct sbmac_softc)); if (!dev) {
err = -ENOMEM; goto out_unmap;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.