// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/ethernet/ibm/emac/core.c * * Driver for PowerPC 4xx on-chip ethernet controller. * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * Matt Porter <mporter@kernel.crashing.org> * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> * Armin Kuster <akuster@mvista.com> * Johnnie Peters <jpeters@mvista.com>
*/
/* * Lack of dma_unmap_???? calls is intentional. * * API-correct usage requires additional support state information to be * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to * EMAC design (e.g. TX buffer passed from network stack can be split into * several BDs, dma_map_single/dma_map_page can be used to map particular BD), * maintaining such information will add additional overhead. * Current DMA API implementation for 4xx processors only ensures cache coherency * and dma_unmap_???? routines are empty and are likely to stay this way. * I decided to omit dma_unmap_??? calls because I don't want to add additional * complexity just for the sake of following some abstract API, when it doesn't * add any real benefit to the driver. I understand that this decision maybe * controversial, but I really tried to make code API-correct and efficient * at the same time and didn't come up with code I liked :(. --ebs
*/
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR
("Eugene Surovegin or ");
MODULE_LICENSE("GPL");
/* minimum number of free TX descriptors required to wake up TX process */ #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
/* If packet size is less than this number, we allocate small skb and copy packet * contents into it instead of just sending original big skb up
*/ #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
/* Since multiple EMACs share MDIO lines in various ways, we need * to avoid re-using the same PHY ID in cases where the arch didn't * setup precise phy_map entries * * XXX This is something that needs to be reworked as we can have multiple * EMAC "sets" (multiple ASICs containing several EMACs) though we can * probably require in that case to have explicit PHY IDs in the device-tree
*/ static u32 busy_phy_map; static DEFINE_MUTEX(emac_phy_map_lock);
/* Having stable interface names is a doomed idea. However, it would be nice * if we didn't have completely random interface names at boot too :-) It's * just a matter of making everybody's life easier. Since we are doing * threaded probing, it's a bit harder though. The base idea here is that * we make up a list of all emacs in the device-tree before we register the * driver. Every emac will then wait for the previous one in the list to * initialize before itself. We should also keep that list ordered by * cell_index. * That list is only 4 entries long, meaning that additional EMACs don't * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
*/
r = in_be32(&p->mr0); if (r & EMAC_MR0_TXE) { int n = dev->stop_timeout;
out_be32(&p->mr0, r & ~EMAC_MR0_TXE); while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
udelay(1);
--n;
} if (unlikely(!n))
emac_report_timeout_error(dev, "TX disable timeout");
}
}
if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) goto out;
DBG(dev, "rx_enable" NL);
r = in_be32(&p->mr0); if (!(r & EMAC_MR0_RXE)) { if (unlikely(!(r & EMAC_MR0_RXI))) { /* Wait if previous async disable is still in progress */ int n = dev->stop_timeout; while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
udelay(1);
--n;
} if (unlikely(!n))
emac_report_timeout_error(dev, "RX disable timeout");
}
out_be32(&p->mr0, r | EMAC_MR0_RXE);
}
out:
;
}
r = in_be32(&p->mr0); if (r & EMAC_MR0_RXE) { int n = dev->stop_timeout;
out_be32(&p->mr0, r & ~EMAC_MR0_RXE); while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
udelay(1);
--n;
} if (unlikely(!n))
emac_report_timeout_error(dev, "RX disable timeout");
}
}
/* NOTE: unconditional netif_wake_queue is only appropriate * so long as all callers are assured to have free tx slots * (taken from tg3... though the case where that is wrong is * not terribly harmful)
*/
mal_poll_enable(dev->mal, &dev->commac);
}
r = in_be32(&p->mr0); if (r & EMAC_MR0_RXE)
out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
}
staticint emac_reset(struct emac_instance *dev)
{ struct emac_regs __iomem *p = dev->emacp; int n = 20; bool __maybe_unused try_internal_clock = false;
DBG(dev, "reset" NL);
if (!dev->reset_failed) { /* 40x erratum suggests stopping RX channel before reset, * we stop TX as well
*/
emac_rx_disable(dev);
emac_tx_disable(dev);
}
#ifdef CONFIG_PPC_DCR_NATIVE
do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: * Note: The PHY must provide a TX Clk in order to perform a soft reset * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. * * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the * ethernet cable is not attached. This causes the reset to timeout * and the PHY detection code in emac_init_phy() is unable to * communicate and detect the AR8035-A PHY. As a result, the emac * driver bails out early and the user has no ethernet. * In order to stay compatible with existing configurations, the * driver will temporarily switch to the internal clock, after * the first reset fails.
*/ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { if (try_internal_clock || (dev->phy_address == 0xffffffff &&
dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
} else { /* PHY present: select external clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
}
} #endif
out_be32(&p->mr0, EMAC_MR0_SRST); while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
--n;
#ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { if (!n && !try_internal_clock) { /* first attempt has timed out. */
n = 20;
try_internal_clock = true; goto do_retry;
}
if (try_internal_clock || (dev->phy_address == 0xffffffff &&
dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
}
} #endif
r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r |= EMAC4_RMR_BASE; else
r |= EMAC_RMR_BASE;
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME; elseif (ndev->flags & IFF_ALLMULTI ||
(netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME; elseif (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
r &= ~EMAC4_RMR_MJS_MASK;
r |= EMAC4_RMR_MJS(ndev->mtu);
}
return r;
}
static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
{
u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
DBG2(dev, "__emac_calc_base_mr1" NL);
switch(tx_size) { case 2048:
ret |= EMAC_MR1_TFS_2K; break; default:
printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
dev->ndev->name, tx_size);
}
switch(rx_size) { case 16384:
ret |= EMAC_MR1_RFS_16K; break; case 4096:
ret |= EMAC_MR1_RFS_4K; break; default:
printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
dev->ndev->name, rx_size);
}
return ret;
}
static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
{
u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
DBG2(dev, "__emac4_calc_base_mr1" NL);
switch(tx_size) { case 16384:
ret |= EMAC4_MR1_TFS_16K; break; case 8192:
ret |= EMAC4_MR1_TFS_8K; break; case 4096:
ret |= EMAC4_MR1_TFS_4K; break; case 2048:
ret |= EMAC4_MR1_TFS_2K; break; default:
printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
dev->ndev->name, tx_size);
}
switch(rx_size) { case 16384:
ret |= EMAC4_MR1_RFS_16K; break; case 8192:
ret |= EMAC4_MR1_RFS_8K; break; case 4096:
ret |= EMAC4_MR1_RFS_4K; break; case 2048:
ret |= EMAC4_MR1_RFS_2K; break; default:
printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
dev->ndev->name, rx_size);
}
/* No link, force loopback */ if (!link)
mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
/* Check for full duplex */ elseif (dev->phy.duplex == DUPLEX_FULL)
mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
/* Adjust fifo sizes, mr1 and timeouts based on link speed */
dev->stop_timeout = STOP_TIMEOUT_10; switch (dev->phy.speed) { case SPEED_1000: if (emac_phy_gpcs(dev->phy.mode)) {
mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
(dev->phy.gpcs_address != 0xffffffff) ?
dev->phy.gpcs_address : dev->phy.address);
/* Put some arbitrary OUI, Manuf & Rev IDs so we can * identify this GPCS PHY later.
*/
out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
} else
mr1 |= EMAC_MR1_MF_1000;
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
dev->phy.speed); if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
/* on 40x erratum forces us to NOT use integrated flow control, * let's hope it works on 44x ;)
*/ if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
dev->phy.duplex == DUPLEX_FULL) { if (dev->phy.pause)
mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP; elseif (dev->phy.asym_pause)
mr1 |= EMAC_MR1_APP;
}
/* Add base settings & fifo sizes & program MR1 */
mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
out_be32(&p->mr1, mr1);
/* PAUSE frame is sent when RX FIFO reaches its high-water mark, there should be still enough space in FIFO to allow the our link partner time to process this frame and also time to send PAUSE frame itself.
Here is the worst case scenario for the RX FIFO "headroom" (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
1) One maximum-length frame on TX 1522 bytes 2) One PAUSE frame time 64 bytes 3) PAUSE frame decode time allowance 64 bytes 4) One maximum-length frame on RX 1522 bytes 5) Round-trip propagation delay of the link (100Mb) 15 bytes ---------- 3187 bytes
I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes) low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
*/
r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
rx_size / 4 / dev->fifo_entry_size);
out_be32(&p->rwmr, r);
/* Set PAUSE timer to the maximum */
out_be32(&p->ptr, 0xffff);
/* We need to take GPCS PHY out of isolate mode after EMAC reset */ if (emac_phy_gpcs(dev->phy.mode)) { if (dev->phy.gpcs_address != 0xffffffff)
emac_mii_reset_gpcs(&dev->phy); else
emac_mii_reset_phy(&dev->phy);
}
if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
done = !done;
return done;
};
staticint __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
{ struct emac_regs __iomem *p = dev->emacp;
u32 r = 0; int n, err = -ETIMEDOUT;
mutex_lock(&dev->mdio_lock);
DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
/* Enable proper MDIO port */ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_get_mdio(dev->zmii_dev, dev->zmii_port); if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
/* Wait for management interface to become idle */
n = 20; while (!emac_phy_done(dev, in_be32(&p->stacr))) {
udelay(1); if (!--n) {
DBG2(dev, " -> timeout wait idle\n"); goto bail;
}
}
/* Issue read command */ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r = EMAC4_STACR_BASE(dev->opb_bus_freq); else
r = EMAC_STACR_BASE(dev->opb_bus_freq); if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
r |= EMAC_STACR_OC; if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
r |= EMACX_STACR_STAC_READ; else
r |= EMAC_STACR_STAC_READ;
r |= (reg & EMAC_STACR_PRA_MASK)
| ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
out_be32(&p->stacr, r);
/* Wait for read to complete */
n = 200; while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
udelay(1); if (!--n) {
DBG2(dev, " -> timeout wait complete\n"); goto bail;
}
}
/* Enable proper MDIO port */ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_get_mdio(dev->zmii_dev, dev->zmii_port); if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
/* Wait for management interface to be idle */
n = 20; while (!emac_phy_done(dev, in_be32(&p->stacr))) {
udelay(1); if (!--n) {
DBG2(dev, " -> timeout wait idle\n"); goto bail;
}
}
/* Issue write command */ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r = EMAC4_STACR_BASE(dev->opb_bus_freq); else
r = EMAC_STACR_BASE(dev->opb_bus_freq); if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
r |= EMAC_STACR_OC; if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
r |= EMACX_STACR_STAC_WRITE; else
r |= EMAC_STACR_STAC_WRITE;
r |= (reg & EMAC_STACR_PRA_MASK) |
((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
(val << EMAC_STACR_PHYD_SHIFT);
out_be32(&p->stacr, r);
/* Wait for write to complete */
n = 200; while (!emac_phy_done(dev, in_be32(&p->stacr))) {
udelay(1); if (!--n) {
DBG2(dev, " -> timeout wait complete\n"); goto bail;
}
}
bail: if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
mutex_unlock(&dev->mdio_lock);
}
staticint emac_mdio_read(struct net_device *ndev, int id, int reg)
{ struct emac_instance *dev = netdev_priv(ndev); int res;
/* I decided to relax register access rules here to avoid * full EMAC reset. * * There is a real problem with EMAC4 core if we use MWSW_001 bit * in MR1 register and do a full EMAC reset. * One TX BD status update is delayed and, after EMAC reset, it * never happens, resulting in TX hung (it'll be recovered by TX * timeout handler eventually, but this is just gross). * So we either have to do full TX reset or try to cheat here :) * * The only required change is to RX mode register, so I *think* all * we need is just to stop RX channel. This seems to work on all * tested SoCs. --ebs *
*/
dev->mcast_pending = 0;
emac_rx_disable(dev); if (rmr & EMAC_RMR_MAE)
emac_hash_mc(dev);
out_be32(&p->rmr, rmr);
emac_rx_enable(dev);
}
staticint emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
{ int rx_sync_size = emac_rx_sync_size(new_mtu); int rx_skb_size = emac_rx_skb_size(new_mtu); int i, ret = 0; int mr1_jumbo_bit_change = 0;
if (dev->rx_sg_skb) {
++dev->estats.rx_dropped_resize;
dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL;
}
/* Make a first pass over RX ring and mark BDs ready, dropping * non-processed packets on the way. We need this as a separate pass * to simplify error recovery in the case of allocation failure later.
*/ for (i = 0; i < NUM_RX_BUFF; ++i) { if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
++dev->estats.rx_dropped_resize;
/* Process ctx, rtnl_lock semaphore */ staticint emac_change_mtu(struct net_device *ndev, int new_mtu)
{ struct emac_instance *dev = netdev_priv(ndev); int ret = 0;
DBG(dev, "change_mtu(%d)" NL, new_mtu);
if (netif_running(ndev)) { /* Check if we really need to reinitialize RX ring */ if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
ret = emac_resize_rx_ring(dev, new_mtu);
}
staticvoid emac_print_link_status(struct emac_instance *dev)
{ if (netif_carrier_ok(dev->ndev))
printk(KERN_INFO "%s: link is up, %d %s%s\n",
dev->ndev->name, dev->phy.speed,
dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
dev->phy.pause ? ", pause enabled" :
dev->phy.asym_pause ? ", asymmetric pause enabled" : ""); else
printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
}
/* Process ctx, rtnl_lock semaphore */ staticint emac_open(struct net_device *ndev)
{ struct emac_instance *dev = netdev_priv(ndev); int i;
DBG(dev, "open" NL);
/* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) if (emac_alloc_rx_skb(dev, i)) {
printk(KERN_ERR "%s: failed to allocate RX ring\n",
ndev->name); goto oom;
}
if (dev->phy.def->ops->poll_link(&dev->phy)) { if (!netif_carrier_ok(dev->ndev)) {
emac_rx_clk_default(dev); /* Get new link parameters */
dev->phy.def->ops->read_link(&dev->phy);
/* Send the packet out. If the if makes a significant perf * difference, then we can store the TMR0 value in "dev" * instead
*/ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
out_be32(&p->tmr0, EMAC4_TMR0_XMIT); else
out_be32(&p->tmr0, EMAC_TMR0_XMIT);
staticinlineint emac_xmit_split(struct emac_instance *dev, int slot,
u32 pd, int len, int last, u16 base_ctrl)
{ while (1) {
u16 ctrl = base_ctrl; int chunk = min(len, MAL_MAX_TX_SIZE);
len -= chunk;
slot = (slot + 1) % NUM_TX_BUFF;
if (last && !len)
ctrl |= MAL_TX_CTRL_LAST; if (slot == NUM_TX_BUFF - 1)
ctrl |= MAL_TX_CTRL_WRAP;
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ static netdev_tx_t
emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
{ struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; int len = skb->len, chunk; int slot, i;
u16 ctrl;
u32 pd;
/* This is common "fast" path */ if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) return emac_start_xmit(skb, ndev);
len -= skb->data_len;
/* Note, this is only an *estimation*, we can still run out of empty * slots because of the additional fragmentation into * MAL_MAX_TX_SIZE-sized chunks
*/ if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF)) goto stop_queue;
staticinlineint emac_rx_sg_append(struct emac_instance *dev, int slot)
{ if (likely(dev->rx_sg_skb != NULL)) { int len = dev->rx_desc[slot].data_len; int tot_len = dev->rx_sg_skb->len + len;
if (isr & EMAC4_ISR_TXPE)
++st->tx_parity; if (isr & EMAC4_ISR_RXPE)
++st->rx_parity; if (isr & EMAC4_ISR_TXUE)
++st->tx_underrun; if (isr & EMAC4_ISR_RXOE)
++st->rx_fifo_overrun; if (isr & EMAC_ISR_OVR)
++st->rx_overrun; if (isr & EMAC_ISR_BP)
++st->rx_bad_packet; if (isr & EMAC_ISR_RP)
++st->rx_runt_packet; if (isr & EMAC_ISR_SE)
++st->rx_short_event; if (isr & EMAC_ISR_ALE)
++st->rx_alignment_error; if (isr & EMAC_ISR_BFCS)
++st->rx_bad_fcs; if (isr & EMAC_ISR_PTLE)
++st->rx_packet_too_long; if (isr & EMAC_ISR_ORE)
++st->rx_out_of_range; if (isr & EMAC_ISR_IRE)
++st->rx_in_range; if (isr & EMAC_ISR_SQE)
++st->tx_sqe; if (isr & EMAC_ISR_TE)
++st->tx_errors;
staticint emac_check_deps(struct emac_instance *dev, struct emac_depentry *deps)
{ int i, there = 0; struct device_node *np;
for (i = 0; i < EMAC_DEP_COUNT; i++) { /* no dependency on that item, allright */ if (deps[i].phandle == 0) {
there++; continue;
} /* special case for blist as the dependency might go away */ if (i == EMAC_DEP_PREV_IDX) {
np = *(dev->blist - 1); if (np == NULL) {
deps[i].phandle = 0;
there++; continue;
} if (deps[i].node == NULL)
deps[i].node = of_node_get(np);
} if (deps[i].node == NULL)
deps[i].node = of_find_node_by_phandle(deps[i].phandle); if (deps[i].node == NULL) continue; if (deps[i].ofdev == NULL)
deps[i].ofdev = of_find_device_by_node(deps[i].node); if (deps[i].ofdev == NULL) continue; if (deps[i].drvdata == NULL)
deps[i].drvdata = platform_get_drvdata(deps[i].ofdev); if (deps[i].drvdata != NULL)
there++;
} if (there != EMAC_DEP_COUNT) return -EPROBE_DEFER; return 0;
}
staticint emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
{ int ret = emac_mdio_read(bus->priv, addr, regnum); /* This is a workaround for powered down ports/phys. * In the wild, this was seen on the Cisco Meraki MX60(W). * This hardware disables ports as part of the handoff * procedure. Accessing the ports will lead to errors * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
*/ return ret < 0 ? 0xffff : ret;
}
staticint emac_mii_bus_write(struct mii_bus *bus, int addr, int regnum, u16 val)
{
emac_mdio_write(bus->priv, addr, regnum, val); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.