// SPDX-License-Identifier: GPL-2.0-or-later /* drivers/net/ethernet/freescale/gianfar.c * * Gianfar Ethernet Driver * This driver is designed for the non-CPM ethernet controllers * on the 85xx and 83xx family of integrated processors * Based on 8260_io/fcc_enet.c * * Author: Andy Fleming * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. * Copyright 2007 MontaVista Software, Inc. * * Gianfar: AKA Lambda Draconis, "Dragon" * RA 11 31 24.2 * Dec +69 19 52 * V 3.84 * B-V +1.62 * * Theory of operation * * The driver is initialized through of_device. Configuration information * is therefore conveyed through an OF-style device tree. * * The Gianfar Ethernet Controller uses a ring of buffer * descriptors. The beginning is indicated by a register * pointing to the physical address of the start of the ring. * The end is determined by a "wrap" bit being set in the * last descriptor of the ring. * * When a packet is received, the RXF bit in the * IEVENT register is set, triggering an interrupt when the * corresponding bit in the IMASK register is also set (if * interrupt coalescing is active, then the interrupt may not * happen immediately, but will wait until either a set number * of frames or amount of time have passed). In NAPI, the * interrupt handler will signal there is work to be done, and * exit. This method will start at the last known empty * descriptor, and process every subsequent descriptor until there * are none left with data (NAPI will stop after a set number of * packets to give time to other tasks, but will eventually * process all the packets). The data arrives inside a * pre-allocated skb, and so after the skb is passed up to the * stack, a new skb must be allocated, and the address field in * the buffer descriptor must be updated to indicate this new * skb. * * When the kernel requests that a packet be transmitted, the * driver starts where it left off last time, and points the * descriptor at the buffer which was passed in. The driver * then informs the DMA engine that there are packets ready to * be transmitted. Once the controller is finished transmitting * the packet, an interrupt may be triggered (under the same * conditions as for reception, but depending on the TXF bit). * The driver then cleans up the buffer.
*/
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */
gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
}
baddr = ®s->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
gfar_write(baddr + i, 0); if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
}
baddr = ®s->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
gfar_write(baddr + i, 0); if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
}
} else { /* Backward compatible case -- even if we enable * multiple queues, there's only single reg to program
*/
gfar_write(®s->txic, 0); if (likely(priv->tx_queue[0]->txcoalescing))
gfar_write(®s->txic, priv->tx_queue[0]->txic);
gfar_write(®s->rxic, 0); if (unlikely(priv->rx_queue[0]->rxcoalescing))
gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
}
}
/* Set the appropriate hash bit for the given addr */ /* The algorithm works like so: * 1) Take the Destination Address (ie the multicast address), and * do a CRC on it (little endian), and reverse the bits of the * result. * 2) Use the 8 most significant bits as a hash into a 256-entry * table. The table is controlled through 8 32-bit registers: * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is * gaddr7. This means that the 3 most significant bits in the * hash index which gaddr register to use, and the 5 other bits * indicate which bit (assuming an IBM numbering scheme, which * for PowerPC (tm) is usually the case) in the register holds * the entry.
*/ staticvoid gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval; struct gfar_private *priv = netdev_priv(dev);
u32 result = ether_crc(ETH_ALEN, addr); int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
u32 value = (1 << (31-whichbit));
/* There are multiple MAC Address register pairs on some controllers * This function sets the numth pair to a given address
*/ staticvoid gfar_set_mac_for_addr(struct net_device *dev, int num, const u8 *addr)
{ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
u32 __iomem *macptr = ®s->macstnaddr1;
macptr += num*2;
/* For a station address of 0x12345678ABCD in transmission * order (BE), MACnADDR1 is set to 0xCDAB7856 and * MACnADDR2 is set to 0x34120000.
*/
tempval = (addr[5] << 24) | (addr[4] << 16) |
(addr[3] << 8) | addr[2];
gfar_write(macptr, tempval);
tempval = (addr[1] << 24) | (addr[0] << 16);
gfar_write(macptr+1, tempval);
}
staticint gfar_set_mac_addr(struct net_device *dev, void *p)
{ int ret;
ret = eth_mac_addr(dev, p); if (ret) return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
return 0;
}
staticvoid gfar_ints_disable(struct gfar_private *priv)
{ int i; for (i = 0; i < priv->num_grps; i++) { struct gfar __iomem *regs = priv->gfargrp[i].regs; /* Clear IEVENT */
gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
staticvoid gfar_ints_enable(struct gfar_private *priv)
{ int i; for (i = 0; i < priv->num_grps; i++) { struct gfar __iomem *regs = priv->gfargrp[i].regs; /* Unmask the interrupts we look for */
gfar_write(®s->imask,
IMASK_DEFAULT | priv->rmon_overflow.imask);
}
}
staticint gfar_alloc_tx_queues(struct gfar_private *priv)
{ int i;
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
GFP_KERNEL); if (!priv->tx_queue[i]) return -ENOMEM;
staticint gfar_alloc_rx_queues(struct gfar_private *priv)
{ int i;
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
GFP_KERNEL); if (!priv->rx_queue[i]) return -ENOMEM;
/* If we aren't the FEC we have multiple interrupts */ if (model && strcasecmp(model, "FEC")) {
gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); if (!gfar_irq(grp, TX)->irq ||
!gfar_irq(grp, RX)->irq ||
!gfar_irq(grp, ER)->irq) return -EINVAL;
}
grp->priv = priv;
spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
} else {
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
}
/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses * right to left, so we need to revert the 8 bits to get the q index
*/
grp->rx_bit_map = bitrev8(grp->rx_bit_map);
grp->tx_bit_map = bitrev8(grp->tx_bit_map);
/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, * also assign queues to groups
*/
for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { if (!grp->rx_queue)
grp->rx_queue = priv->rx_queue[i];
grp->num_rx_queues++;
grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
priv->rx_queue[i]->grp = grp;
}
/* Reads the controller's registers to determine what interface * connects it to the PHY.
*/ static phy_interface_t gfar_get_interface(struct net_device *dev)
{ struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 ecntrl;
ecntrl = gfar_read(®s->ecntrl);
if (ecntrl & ECNTRL_SGMII_MODE) return PHY_INTERFACE_MODE_SGMII;
if (ecntrl & ECNTRL_TBI_MODE) { if (ecntrl & ECNTRL_REDUCED_MODE) return PHY_INTERFACE_MODE_RTBI; else return PHY_INTERFACE_MODE_TBI;
}
if (ecntrl & ECNTRL_REDUCED_MODE) { if (ecntrl & ECNTRL_REDUCED_MII_MODE) { return PHY_INTERFACE_MODE_RMII;
} else {
phy_interface_t interface = priv->interface;
/* This isn't autodetected right now, so it must * be set by the device tree or platform code.
*/ if (interface == PHY_INTERFACE_MODE_RGMII_ID) return PHY_INTERFACE_MODE_RGMII_ID;
return PHY_INTERFACE_MODE_RGMII;
}
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) return PHY_INTERFACE_MODE_GMII;
if (of_device_is_compatible(np, "fsl,etsec2"))
mode = MQ_MG_MODE; else
mode = SQ_SG_MODE;
if (mode == SQ_SG_MODE) {
num_tx_qs = 1;
num_rx_qs = 1;
} else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ unsignedint num_grps;
num_grps = device_get_named_child_node_count(&ofdev->dev, "queue-group"); if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
pr_err("Cannot do alloc_etherdev, aborting\n"); return -EINVAL;
}
num_tx_qs = num_grps; /* one txq per int group */
num_rx_qs = num_grps; /* one rxq per int group */
}
if (num_tx_qs > MAX_TX_QS) {
pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
num_tx_qs, MAX_TX_QS);
pr_err("Cannot do alloc_etherdev, aborting\n"); return -EINVAL;
}
if (num_rx_qs > MAX_RX_QS) {
pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
num_rx_qs, MAX_RX_QS);
pr_err("Cannot do alloc_etherdev, aborting\n"); return -EINVAL;
}
*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
dev = *pdev; if (NULL == dev) return -ENOMEM;
err = gfar_alloc_tx_queues(priv); if (err) goto tx_alloc_failed;
err = gfar_alloc_rx_queues(priv); if (err) goto rx_alloc_failed;
err = of_property_read_string(np, "model", &model); if (err) {
pr_err("Device model property missing, aborting\n"); goto rx_alloc_failed;
}
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
for (i = 0; i < MAXGROUPS; i++)
priv->gfargrp[i].regs = NULL;
/* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) {
for_each_available_child_of_node(np, child) { if (!of_node_name_eq(child, "queue-group")) continue;
/* Use PHY connection type from the DT node if one is specified there. * rgmii-id really needs to be specified. Other types can be * detected by hardware
*/
err = of_get_phy_mode(np, &interface); if (!err)
priv->interface = interface; else
priv->interface = gfar_get_interface(dev);
if (of_property_read_bool(np, "fsl,magic-packet"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
if (of_property_read_bool(np, "fsl,wake-on-filer"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
/* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node.
*/ if (!priv->phy_node && of_phy_is_fixed_link(np)) {
err = of_phy_register_fixed_link(np); if (err) goto err_grp_init;
priv->phy_node = of_node_get(np);
}
/* Find the TBI PHY. If it's not there, we don't support SGMII */
priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
/* Normaly TSEC should not hang on GRS commands, so we should * actually wait for IEVENT_GRSC flag.
*/ if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) return 0;
/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are * the same as bits 23-30, the eTSEC Rx is assumed to be idle * and the Rx can be safely reset.
*/
res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
res &= 0x7f807f80; if ((res & 0xffff) == (res >> 16)) return 1;
return 0;
}
/* Halt the receive and transmit queues */ staticvoid gfar_halt_nodisable(struct gfar_private *priv)
{ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval; unsignedint timeout; int stopped;
gfar_ints_disable(priv);
if (gfar_is_dma_stopped(priv)) return;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(®s->dmactrl);
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
gfar_write(®s->dmactrl, tempval);
/* If there are any tx skbs or rx skbs still around, free them. * Then free tx_skbuff and rx_skbuff
*/ staticvoid free_skb_resources(struct gfar_private *priv)
{ struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; int i;
/* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { struct netdev_queue *txq;
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(®s->dmactrl);
tempval |= DMACTRL_INIT_SETTINGS;
gfar_write(®s->dmactrl, tempval);
/* Make sure we aren't stopped */
tempval = gfar_read(®s->dmactrl);
tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
gfar_write(®s->dmactrl, tempval);
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs; /* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(®s->tstat, priv->gfargrp[i].tstat);
gfar_write(®s->rstat, priv->gfargrp[i].rstat);
}
/* make sure next_to_clean != next_to_use after this * by leaving at least 1 unused descriptor
*/
gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
if (!priv->pause_aneg_en) { if (priv->tx_pause_en)
val |= MACCFG1_TX_FLOW; if (priv->rx_pause_en)
val |= MACCFG1_RX_FLOW;
} else {
u16 lcl_adv, rmt_adv;
u8 flowctrl; /* get link partner capabilities */
rmt_adv = 0; if (phydev->pause)
rmt_adv = LPA_PAUSE_CAP; if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW; if (flowctrl & FLOW_CTRL_RX)
val |= MACCFG1_RX_FLOW;
}
/* Turn last free buffer recording on */ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) {
u32 bdp_dma;
if (netif_msg_link(priv))
phy_print_status(phydev);
}
/* Called every time the controller might need to be made * aware of new link state. The PHY code conveys this * information through variables in the phydev structure, and this * function converts those variables into the appropriate * register values, and can bring down the device if needed.
*/ staticvoid adjust_link(struct net_device *dev)
{ struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev;
/* Initialize TBI PHY interface for communicating with the * SERDES lynx PHY on the chip. We communicate with this PHY * through the MDIO bus on each controller, treating it as a * "normal" PHY at the address found in the TBIPA register. We assume * that the TBIPA register is valid. Either the MDIO bus code will set * it to a value that doesn't conflict with other PHYs on the bus, or the * value doesn't matter, as there are no other PHYs on the bus.
*/ staticvoid gfar_configure_serdes(struct net_device *dev)
{ struct gfar_private *priv = netdev_priv(dev); struct phy_device *tbiphy;
if (!priv->tbi_node) {
dev_warn(&dev->dev, "error: SGMII mode requires that the " "device tree specify a tbi-handle\n"); return;
}
tbiphy = of_phy_find_device(priv->tbi_node); if (!tbiphy) {
dev_err(&dev->dev, "error: Could not get TBI device\n"); return;
}
/* If the link is already up, we must already be ok, and don't need to * configure and reset the TBI<->SerDes link. Maybe U-Boot configured * everything for us? Resetting it takes the link down and requires * several seconds for it to come back.
*/ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
put_device(&tbiphy->mdio.dev); return;
}
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
staticinlinevoid gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, int fcb_length)
{ /* If we're here, it's a IP packet with a TCP or UDP * payload. We set it to checksum, using a pseudo-header * we provide
*/
u8 flags = TXFCB_DEFAULT;
/* Tell the controller what the protocol is * And provide the already calculated phcs
*/ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
} else
fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
/* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr
*/
fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
/* eTSEC12: csum generation not supported for some fcb offsets */ staticinlinebool gfar_csum_errata_12(struct gfar_private *priv, unsignedlong fcb_addr)
{ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
(fcb_addr % 0x20) > 0x18);
}
/* eTSEC76: csum generation for frames larger than 2500 may * cause excess delays before start of transmission
*/ staticinlinebool gfar_csum_errata_76(struct gfar_private *priv, unsignedint len)
{ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
(len > 2500));
}
/* This is called by the kernel when a frame is ready for transmission. * It is pointed to by the dev->hard_start_xmit function pointer
*/ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; struct netdev_queue *txq; struct gfar __iomem *regs = NULL; struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
skb_frag_t *frag; int i, rq = 0; int do_tstamp, do_csum, do_vlan;
u32 bufaddr; unsignedint nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
/* check if time stamp should be generated */ if (unlikely(do_tstamp))
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */ if (fcb_len) { if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
}
}
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
/* calculate the required number of TxBDs for this skb */ if (unlikely(do_tstamp))
nr_txbds = nr_frags + 2; else
nr_txbds = nr_frags + 1;
/* check if there is space to queue this packet */ if (nr_txbds > tx_queue->num_txbdfree) { /* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY;
}
/* Add TxPAL between FCB and frame if required */ if (unlikely(do_tstamp)) {
skb_push(skb, GMAC_TXPAL_LEN);
memset(skb->data, 0, GMAC_TXPAL_LEN);
}
/* Add TxFCB if required */ if (fcb_len) {
fcb = gfar_add_fcb(skb);
lstatus |= BD_LFLAG(TXBD_TOE);
}
/* Set up checksumming */ if (do_csum) {
gfar_tx_checksum(skb, fcb, fcb_len);
if (unlikely(gfar_csum_errata_12(priv, (unsignedlong)fcb)) ||
unlikely(gfar_csum_errata_76(priv, skb->len))) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb); if (do_vlan || do_tstamp) { /* put back a new fcb for vlan/tstamp TOE */
fcb = gfar_add_fcb(skb);
} else { /* Tx TOE not used */
lstatus &= ~(BD_LFLAG(TXBD_TOE));
fcb = NULL;
}
}
}
/* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
tx_queue->tx_ring_size);
if (likely(!nr_frags)) { if (likely(!do_tstamp))
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
/* Place the fragment addresses and lengths into the TxBDs */
frag = &skb_shinfo(skb)->frags[0]; for (i = 0; i < nr_frags; i++, frag++) { unsignedint size;
/* Point at the next BD, wrapping as needed */
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = cpu_to_be32(bufaddr);
txbdp->lstatus = cpu_to_be32(lstatus);
}
lstatus = lstatus_start;
}
/* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of * GMAC_FCB_LEN. The second TxBD points to the actual frame data with * the full frame length.
*/ if (unlikely(do_tstamp)) {
u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
gfar_wmb(); /* force lstatus write before tx_skbuff */
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
/* Update the current skb pointer to the next entry we will use * (wrapping if necessary)
*/
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
TX_RING_MOD_MASK(tx_queue->tx_ring_size);
/* We can work in parallel with gfar_clean_tx_ring(), except * when modifying num_txbdfree. Note that we didn't grab the lock * when we were reading the num_txbdfree and checking for available * space, that's because outside of this function it can only grow.
*/
spin_lock_bh(&tx_queue->txlock); /* reduce TxBD free count */
tx_queue->num_txbdfree -= (nr_txbds);
spin_unlock_bh(&tx_queue->txlock);
/* If the next BD still needs to be cleaned up, then the bds * are full. We need to tell the kernel to stop sending us stuff.
*/ if (!tx_queue->num_txbdfree) {
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
}
/* Tell the DMA to go go go */
gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
return NETDEV_TX_OK;
dma_map_err:
txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); if (do_tstamp)
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); for (i = 0; i < nr_frags; i++) {
lstatus = be32_to_cpu(txbdp->lstatus); if (!(lstatus & BD_LFLAG(TXBD_READY))) break;
/* Changes the mac address if the controller is not running. */ staticint gfar_set_mac_address(struct net_device *dev)
{
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
cpu_relax();
stop_gfar(ndev);
startup_gfar(ndev);
clear_bit_unlock(GFAR_RESETTING, &priv->state);
}
/* gfar_reset_task gets scheduled when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and * starting over will fix the problem.
*/ staticvoid gfar_reset_task(struct work_struct *work)
{ struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
reset_gfar(priv->ndev);
}
/* When time stamping, one additional TxBD must be freed. * Also, we need to dma_unmap_single() the TxPAL.
*/ if (unlikely(do_tstamp))
nr_txbds = frags + 2; else
nr_txbds = frags + 1;
/* If we freed a buffer, we can restart transmission, if necessary */ if (tx_queue->num_txbdfree &&
netif_tx_queue_stopped(txq) &&
!(test_bit(GFAR_DOWN, &priv->state)))
netif_wake_subqueue(priv->ndev, tqi);
if (unlikely(ievent & IEVENT_FGPI)) {
gfar_write(&grp->regs->ievent, IEVENT_FGPI); return IRQ_HANDLED;
}
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_rx);
} else { /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived.
*/
gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
}
if (likely(napi_schedule_prep(&grp->napi_tx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_tx);
} else { /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived.
*/
gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
}
if (likely(first)) {
skb_put(skb, size);
} else { /* the last fragments' length contains the full frame length */ if (lstatus & BD_LFLAG(RXBD_LAST))
size -= skb->len;
WARN(size < 0, "gianfar: rx fragment size underflow"); if (size < 0) returnfalse;
if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { /* reuse the free half of the page */
gfar_reuse_rx_page(rx_queue, rxb);
} else { /* page cannot be reused, unmap it */
dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* clear rxb content */
rxb->page = NULL;
return skb;
}
staticinlinevoid gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{ /* If valid headers were found, and valid sums * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is [FIXME]
*/ if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
(RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY; else
skb_checksum_none_assert(skb);
}
/* Trim off the FCS */
pskb_trim(skb, skb->len - ETH_FCS_LEN);
if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set.
*/ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring * until the budget/quota has been reached. Returns the number * of frames handled
*/ staticint gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{ struct net_device *ndev = rx_queue->ndev; struct gfar_private *priv = netdev_priv(ndev); struct rxbd8 *bdp; int i, howmany = 0; struct sk_buff *skb = rx_queue->skb; int cleaned_cnt = gfar_rxbd_unused(rx_queue); unsignedint total_bytes = 0, total_pkts = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.