// SPDX-License-Identifier: GPL-2.0+ /* * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) * * Right now, I am very wasteful with the buffers. I allocate memory * pages and then divide them into 2K frame buffers. This way I know I * have buffers large enough to hold one frame within one buffer descriptor. * Once I get this working, I will use 64 or 128 byte CPM buffers, which * will be much more memory efficient and will easily handle lots of * small packets. * * Much better multiple PHY support by Magnus Damm. * Copyright (c) 2000 Ericsson Radio Systems AB. * * Support for FEC controller of ColdFire processors. * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) * * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) * Copyright (c) 2004-2006 Macq Electronique SA. * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
#ifdefined(CONFIG_M5272) /* * Some hardware gets it MAC address out of local flash memory. * if this is non-zero then assume it is the address to get MAC from.
*/ #ifdefined(CONFIG_NETtel) #define FEC_FLASHMAC 0xf0006006 #elifdefined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) #define FEC_FLASHMAC 0xf0006000 #elifdefined(CONFIG_CANCam) #define FEC_FLASHMAC 0xf0020000 #elifdefined (CONFIG_M5272C3) #define FEC_FLASHMAC (0xffe04000 + 4) #elifdefined(CONFIG_MOD5272) #define FEC_FLASHMAC 0xffc0406b #else #define FEC_FLASHMAC 0 #endif #endif/* CONFIG_M5272 */
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. * * 2048 byte skbufs are allocated. However, alignment requirements * varies between FEC variants. Worst case is 64, so round down by 64.
*/ #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) #define PKT_MINBUF_SIZE 64
/* MIB Control Register */ #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
/* * The 5270/5271/5280/5282/532x RX control register also contains maximum frame * size bits. Other FEC hardware does not, so we need to take that into * account when setting it.
*/ #ifdefined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ defined(CONFIG_ARM64) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 #endif
/* * Coldfire does not support DMA coherent allocations, and has historically used * a band-aid with a manual flush in fec_enet_rx_queue.
*/ #ifdefined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) staticvoid *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp)
{ return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
}
staticint
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{ /* Only run for packets requiring a checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0;
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
/* Handle the last BD specially */ if (frag == nr_frags - 1) {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) {
estatus |= BD_ENET_TX_INT; if (unlikely(skb_shinfo(skb)->tx_flags &
SKBTX_HW_TSTAMP && fep->hwts_tx_en))
estatus |= BD_ENET_TX_TS;
}
}
if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = cpu_to_fec16(frag_len); /* Make sure the updates to rest of the descriptor are * performed before transferring ownership.
*/
wmb();
bdp->cbd_sc = cpu_to_fec16(status);
}
return bdp;
dma_mapping_error:
bdp = txq->bd.cur; for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
} return ERR_PTR(-ENOMEM);
}
entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb); if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for SG!\n"); return NETDEV_TX_OK;
}
/* Protocol checksum off-load for TCP and UDP. */ if (fec_enet_clear_csum(skb, ndev)) {
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
}
/* Fill in a Tx ring entry */
bdp = txq->bd.cur;
last_bdp = bdp;
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
bufaddr = skb->data;
buflen = skb_headlen(skb);
index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsignedlong) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen);
bufaddr = txq->tx_bounce[index];
if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen);
}
/* Push the data cache so the CPM does not get stale memory data. */
addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, addr)) {
dev_kfree_skb_any(skb); if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n"); return NETDEV_TX_OK;
}
if (nr_frags) {
last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); if (IS_ERR(last_bdp)) {
dma_unmap_single(&fep->pdev->dev, addr,
buflen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
}
} else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) {
estatus = BD_ENET_TX_INT; if (unlikely(skb_shinfo(skb)->tx_flags &
SKBTX_HW_TSTAMP && fep->hwts_tx_en))
estatus |= BD_ENET_TX_TS;
}
}
bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = cpu_to_fec16(buflen);
index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */
txq->tx_buf[index].buf_p = skb;
/* Make sure the updates to rest of the descriptor are performed before * transferring ownership.
*/
wmb();
/* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end.
*/
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
skb_tx_timestamp(skb);
/* Make sure the update to bdp is performed before txq->bd.cur. */
wmb();
txq->bd.cur = bdp;
if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
/* Handle the last BD specially */ if (last_tcp)
status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); if (is_last) {
status |= BD_ENET_TX_INTR; if (fep->bufdesc_ex)
ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
}
/* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else
bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->bd.cur = rxq->bd.base;
}
for (q = 0; q < fep->num_tx_queues; q++) { /* ...and the same for transmit */
txq = fep->tx_queue[q];
bdp = txq->bd.base;
txq->bd.cur = bdp;
for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE); if (txq->tx_buf[i].buf_p)
dev_kfree_skb_any(txq->tx_buf[i].buf_p);
} elseif (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
/* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself.
*/ staticvoid fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
{
u32 val;
/* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network * packet processing for this device must be stopped before this call.
*/ staticvoid
fec_restart(struct net_device *ndev)
{ struct fec_enet_private *fep = netdev_priv(ndev);
u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
fec_ctrl_reset(fep, false);
/* * enet-mac reset will reset mac address registers too, * so need to reconfigure it.
*/
fec_set_hw_mac_addr(ndev);
/* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { /* FD enable */
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else { /* No Rcv on Xmit */
rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
#if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) {
u32 val = readl(fep->hwp + FEC_RACC);
/* align IP header */
val |= FEC_RACC_SHIFT16; if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) /* set RX checksum */
val |= FEC_RACC_OPTIONS; else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
} #endif
/* * The phy interface and speed need to get configured * differently on enet-mac.
*/ if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */
rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */ if (phy_interface_mode_is_rgmii(fep->phy_interface))
rcntl |= FEC_RCR_RGMII; elseif (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= FEC_RCR_RMII; else
rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */ if (ndev->phydev) { if (ndev->phydev->speed == SPEED_1000)
ecntl |= FEC_ECR_SPEED; elseif (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET; else
rcntl |= FEC_RCR_10BASET;
}
} else { #ifdef FEC_MIIGSK_ENR if (fep->quirks & FEC_QUIRK_USE_GASKET) {
u32 cfgr; /* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR); while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
udelay(1);
/* * configure the gasket: * RMII, 50 MHz, no loopback, no echo * MII, 25 MHz, no loopback, no echo
*/
cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; if (ndev->phydev && ndev->phydev->speed == SPEED_10)
cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
/* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) {
writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
udelay(10); if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
/* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
if (fep->bufdesc_ex) {
val = readl(fep->hwp + FEC_ECNTRL);
val |= FEC_ECR_EN1588;
writel(val, fep->hwp + FEC_ECNTRL);
staticvoid
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
{ struct fec_enet_private *fep; struct xdp_frame *xdpf; struct bufdesc *bdp; unsignedshort status; struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct netdev_queue *nq; int index = 0; int entries_free; struct page *page; int frame_len;
fep = netdev_priv(ndev);
txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */
nq = netdev_get_tx_queue(ndev, queue_id);
bdp = txq->dirty_tx;
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
while (bdp != READ_ONCE(txq->bd.cur)) { /* Order the load of bd.cur and cbd_sc */
rmb();
status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break;
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
skb = txq->tx_buf[index].buf_p; if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) goto tx_buf_done;
} else { /* Tx processing cannot call any XDP (or page pool) APIs if * the "budget" is 0. Because NAPI is called with budget of * 0 (such as netpoll) indicates we may be in an IRQ context, * however, we can't use the page pool from IRQ context.
*/ if (unlikely(!budget)) break;
/* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK.
*/ if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++;
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who * are to time stamp the packet, so we still need to check time * stamping enabled flag.
*/ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
fep->hwts_tx_en) && fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
tx_buf_done: /* Make sure the update to bdp and tx_buf are performed * before dirty_tx
*/
wmb();
txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
/* Since we have freed up a buffer, the ring is no longer full
*/ if (netif_tx_queue_stopped(nq)) {
entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
/* ERR006358: Keep the transmitter going */ if (bdp != txq->bd.cur &&
readl(txq->bd.reg_desc_active) == 0)
writel(0, txq->bd.reg_desc_active);
}
staticvoid fec_enet_tx(struct net_device *ndev, int budget)
{ struct fec_enet_private *fep = netdev_priv(ndev); int i;
/* Make sure that AVB queues are processed first. */ for (i = fep->num_tx_queues - 1; i >= 0; i--)
fec_enet_tx_queue(ndev, i, budget);
}
/* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet.
*/ staticint
fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{ struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsignedshort status; struct sk_buff *skb;
ushort pkt_len; int pkt_received = 0; struct bufdesc_ex *ebdp = NULL; int index = 0; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
u32 data_start = FEC_ENET_XDP_HEADROOM; int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page;
__fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272) /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of * FEC_RACC_SHIFT16 is set by default in the probe function.
*/ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
data_start += 2;
sub_len += 2;
} #endif
#ifdefined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) /* * Hacky flush of all caches instead of using the DMA API for the TSO * headers.
*/
flush_cache_all(); #endif
rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget) break;
pkt_received++;
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp); /* subtract 16bit shift and FCS */
xdp_prepare_buff(&xdp, page_address(page),
data_start, pkt_len - sub_len, false);
ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
xdp_result |= ret; if (ret != FEC_ENET_XDP_PASS) goto rx_processing_done;
}
/* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications.
*/
skb = build_skb(page_address(page), PAGE_SIZE); if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
/* Extract the enhanced buffer descriptor */
ebdp = NULL; if (fep->bufdesc_ex)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */ if (fep->bufdesc_ex &&
(ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
/* Get receive timestamp from the skb */ if (fep->hwts_rx_en && fep->bufdesc_ex)
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
skb_hwtstamps(skb));
if (fep->bufdesc_ex &&
(fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { /* don't check it */
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
skb_checksum_none_assert(skb);
}
}
rx_processing_done: /* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
} /* Make sure the updates to rest of the descriptor are * performed before transferring ownership.
*/
wmb();
bdp->cbd_sc = cpu_to_fec16(status);
/* Update BD pointer to next entry */
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
/* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources.
*/
writel(0, rxq->bd.reg_desc_active);
}
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
xdp_do_flush();
return pkt_received;
}
staticint fec_enet_rx(struct net_device *ndev, int budget)
{ struct fec_enet_private *fep = netdev_priv(ndev); int i, done = 0;
/* Make sure that AVB queues are processed first. */ for (i = fep->num_rx_queues - 1; i >= 0; i--)
done += fec_enet_rx_queue(ndev, i, budget - done);
/* * try to get mac address in following order: * * 1) module parameter via kernel command line in form * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
*/
iap = macaddr;
/* * 2) from device tree data
*/ if (!is_valid_ether_addr(iap)) { struct device_node *np = fep->pdev->dev.of_node; if (np) {
ret = of_get_mac_address(np, tmpaddr); if (!ret)
iap = tmpaddr; elseif (ret == -EPROBE_DEFER) return ret;
}
}
/* * 3) from flash or fuse (via platform data)
*/ if (!is_valid_ether_addr(iap)) { #ifdef CONFIG_M5272 if (FEC_FLASHMAC)
iap = (unsignedchar *)FEC_FLASHMAC; #else struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
if (pdata)
iap = (unsignedchar *)&pdata->mac; #endif
}
/* * 4) FEC mac registers set by bootloader
*/ if (!is_valid_ether_addr(iap)) {
*((__be32 *) &tmpaddr[0]) =
cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
*((__be16 *) &tmpaddr[4]) =
cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
/* * 5) random mac address
*/ if (!is_valid_ether_addr(iap)) { /* Report it and use a random ethernet address instead */
dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr); return 0;
}
/* Adjust MAC if using macaddr */
eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
/* LPI Sleep Ts count base on tx clk (clk_ref). * The lpi sleep cnt value = X us / (cycle_ns).
*/ staticint fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
{ struct fec_enet_private *fep = netdev_priv(ndev);
/* * If the netdev is down, or is going down, we're not interested * in link state events, so just mark our idea of the link as down * and ignore the event.
*/ if (!netif_running(ndev) || !netif_device_present(ndev)) {
fep->link = 0;
} elseif (phy_dev->link) { if (!fep->link) {
fep->link = phy_dev->link;
status_change = 1;
}
if (phy_dev) {
phy_reset_after_clk_enable(phy_dev);
} elseif (fep->phy_node) { /* * If the PHY still is not bound to the MAC, but there is * OF PHY node and a matching PHY device instance already, * use the OF PHY node to obtain the PHY device instance, * and then use that PHY device instance when triggering * the PHY reset.
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev); if (phy_dev)
put_device(&phy_dev->mdio.dev);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.