/* use some intentionally tricky logic here to initialize the whole struct to * 0xffff, but then override certain fields, requiring us to indicate that we * "know" that there are overrides in this structure, and we'll need to disable * that warning from W=1 builds. GCC has supported this option since 4.2.X, but * the macros available to do this only define GCC 8.
*/
__diag_push();
__diag_ignore_all("-Woverride-init", "logic to initialize all and then override some is OK"); staticconst u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
switch (mdp->phy_interface) { case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
value = 0x3; break; case PHY_INTERFACE_MODE_GMII:
value = 0x2; break; case PHY_INTERFACE_MODE_MII:
value = 0x1; break; case PHY_INTERFACE_MODE_RMII:
value = 0x0; break; default:
netdev_warn(ndev, "PHY interface mode was not setup. Set to MII.\n");
value = 0x1; break;
}
if (reserve)
skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
}
/* Program the hardware MAC address from dev->dev_addr. */ staticvoid update_mac_address(struct net_device *ndev)
{
sh_eth_write(ndev,
(ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
(ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
sh_eth_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
/* Get MAC address from SuperH MAC address register * * SuperH's Ethernet device doesn't have 'ROM' to MAC address. * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). * When you want use this device, you must set MAC address in bootloader. *
*/ staticvoid read_mac_address(struct net_device *ndev, unsignedchar *mac)
{ if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
eth_hw_addr_set(ndev, mac);
} else {
u32 mahr = sh_eth_read(ndev, MAHR);
u32 malr = sh_eth_read(ndev, MALR);
u8 addr[ETH_ALEN];
/* build Rx ring buffer */ for (i = 0; i < mdp->num_rx_ring; i++) { /* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size); if (skb == NULL) break;
sh_eth_set_receive_align(skb);
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
DMA_FROM_DEVICE); if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb); break;
}
mdp->rx_skbuff[i] = skb;
/* Rx descriptor address set */ if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); if (mdp->cd->xdfar_rw)
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */ if (rxdesc)
rxdesc->status |= cpu_to_le32(RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */ for (i = 0; i < mdp->num_tx_ring; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_le32(TD_TFP);
txdesc->len = cpu_to_le32(0); if (i == 0) { /* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); if (mdp->cd->xdfar_rw)
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
txdesc->status |= cpu_to_le32(TD_TDLE);
}
/* Get skb and descriptor buffer */ staticint sh_eth_ring_init(struct net_device *ndev)
{ struct sh_eth_private *mdp = netdev_priv(ndev); int rx_ringsize, tx_ringsize;
/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the * card needs room to do 8 byte alignment, +2 so we can reserve * the first 2 bytes, and +16 gets room for the status word from the * card.
*/
mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
(((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); if (mdp->cd->rpadir)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
GFP_KERNEL); if (!mdp->rx_skbuff) return -ENOMEM;
mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
GFP_KERNEL); if (!mdp->tx_skbuff) goto ring_free;
/* Deactivate all TX descriptors, so DMA should stop at next * packet boundary if it's currently running
*/ for (i = 0; i < mdp->num_tx_ring; i++)
mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
/* Disable TX FIFO egress to MAC */
sh_eth_rcv_snd_disable(ndev);
/* Stop RX DMA at next packet boundary */
sh_eth_write(ndev, 0, EDRRR);
/* Aside from TX DMA, we can't tell when the hardware is * really stopped, so we need to reset to make sure. * Before doing that, wait for long enough to *probably* * finish transmitting the last packet and poll stats.
*/
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
mdp->cd->soft_reset(ndev);
/* Set the RMII mode again if required */ if (mdp->cd->rmiimode)
sh_eth_write(ndev, 0x1, RMIIMODE);
/* Set MAC address again */
update_mac_address(ndev);
}
int entry = mdp->cur_rx % mdp->num_rx_ring; int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; int limit; struct sk_buff *skb;
u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
u16 pkt_len;
u32 buf_len;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { /* RACT bit must be checked before all the following reads */
dma_rmb();
desc_status = le32_to_cpu(rxdesc->status);
pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
if (--boguscnt < 0) break;
netif_info(mdp, rx_status, ndev, "rx entry %d status 0x%08x len %d\n",
entry, desc_status, pkt_len);
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
/* In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to * bit 0. However, in case of the R8A7740 and R7S72100 * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16.
*/ if (mdp->cd->csmr)
desc_status >>= 16;
skb = mdp->rx_skbuff[entry]; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++; if (desc_status & RD_RFS1)
ndev->stats.rx_crc_errors++; if (desc_status & RD_RFS2)
ndev->stats.rx_frame_errors++; if (desc_status & RD_RFS3)
ndev->stats.rx_length_errors++; if (desc_status & RD_RFS4)
ndev->stats.rx_length_errors++; if (desc_status & RD_RFS6)
ndev->stats.rx_missed_errors++; if (desc_status & RD_RFS10)
ndev->stats.rx_over_errors++;
} elseif (skb) {
dma_addr = le32_to_cpu(rxdesc->addr); if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
phys_to_virt(ALIGN(dma_addr, 4)),
pkt_len + 2);
mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_unmap_single(&mdp->pdev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev); if (ndev->features & NETIF_F_RXCSUM)
sh_eth_rx_csum(skb);
netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len; if (desc_status & RD_RFS8)
ndev->stats.multicast++;
}
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */ for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry]; /* The size of the buffer is 32 byte boundary. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
rxdesc->len = cpu_to_le32(buf_len << 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size); if (skb == NULL) break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb); break;
}
mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
rxdesc->addr = cpu_to_le32(dma_addr);
}
dma_wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE); else
rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
}
/* Restart Rx engine if stopped. */ /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
/* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR); /* Mask it with the interrupt mask, forcing ECI interrupt to be always * enabled since it's the one that comes thru regardless of the mask, * and we need to fully handle it in sh_eth_emac_interrupt() in order * to quench it as it doesn't get cleared by just writing 1 to the ECI * bit...
*/
intr_enable = sh_eth_read(ndev, EESIPR);
intr_status &= intr_enable | EESIPR_ECIIP; if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
cd->eesr_err_check))
ret = IRQ_HANDLED; else goto out;
if (unlikely(!mdp->irq_enabled)) {
sh_eth_write(ndev, 0, EESIPR); goto out;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */ if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
sh_eth_rcv_snd_enable(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
if (IS_ERR(phydev)) {
netdev_err(ndev, "failed to connect PHY\n"); return PTR_ERR(phydev);
}
/* mask with MAC supported features */ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
phy_attached_info(phydev);
return 0;
}
/* PHY control start function */ staticint sh_eth_phy_start(struct net_device *ndev)
{ int ret;
ret = sh_eth_phy_init(ndev); if (ret) return ret;
phy_start(ndev->phydev);
return 0;
}
/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the * version must be bumped as well. Just adding registers up to that * limit is fine, as long as the existing register indices don't * change.
*/ #define SH_ETH_REG_DUMP_VERSION 1 #define SH_ETH_REG_DUMP_MAX_REGS 256
/* Dump starts with a bitmap that tells ethtool which * registers are defined for this chip.
*/
len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); if (buf) {
valid_map = buf;
buf += len;
} else {
valid_map = NULL;
}
/* Add a register to the dump, if it has a defined offset. * This automatically skips most undefined registers, but for * some it is also necessary to check a capability flag in * struct sh_eth_cpu_data.
*/ #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) #define add_reg_from(reg, read_expr) do { \ if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ if (buf) { \
mark_reg_valid(reg); \
*buf++ = read_expr; \
} \
++len; \
} \
} while (0) #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
add_reg(EDSR);
add_reg(EDMR);
add_reg(EDTRR);
add_reg(EDRRR);
add_reg(EESR);
add_reg(EESIPR);
add_reg(TDLAR); if (!cd->no_xdfar)
add_reg(TDFAR);
add_reg(TDFXR);
add_reg(TDFFR);
add_reg(RDLAR); if (!cd->no_xdfar)
add_reg(RDFAR);
add_reg(RDFXR);
add_reg(RDFFR);
add_reg(TRSCER);
add_reg(RMFCR);
add_reg(TFTR);
add_reg(FDR);
add_reg(RMCR);
add_reg(TFUCR);
add_reg(RFOCR); if (cd->rmiimode)
add_reg(RMIIMODE);
add_reg(FCFTR); if (cd->rpadir)
add_reg(RPADIR); if (!cd->no_trimd)
add_reg(TRIMD);
add_reg(ECMR);
add_reg(ECSR);
add_reg(ECSIPR);
add_reg(PIR); if (!cd->no_psr)
add_reg(PSR);
add_reg(RDMLR);
add_reg(RFLR);
add_reg(IPGR); if (cd->apr)
add_reg(APR); if (cd->mpr)
add_reg(MPR);
add_reg(RFCR);
add_reg(RFCF); if (cd->tpauser)
add_reg(TPAUSER);
add_reg(TPAUSECR); if (cd->gecmr)
add_reg(GECMR); if (cd->bculr)
add_reg(BCULR);
add_reg(MAHR);
add_reg(MALR); if (!cd->no_tx_cntrs) {
add_reg(TROCR);
add_reg(CDCR);
add_reg(LCCR);
add_reg(CNDCR);
}
add_reg(CEFCR);
add_reg(FRECR);
add_reg(TSFRCR);
add_reg(TLFRCR); if (cd->cexcr) {
add_reg(CERCR);
add_reg(CEECR);
}
add_reg(MAFCR); if (cd->rtrate)
add_reg(RTRATE); if (cd->csmr)
add_reg(CSMR); if (cd->select_mii)
add_reg(RMII_MII); if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST); if (cd->dual_port) {
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);
add_tsu_reg(TSU_FCM);
add_tsu_reg(TSU_BSYSL0);
add_tsu_reg(TSU_BSYSL1);
add_tsu_reg(TSU_PRISL0);
add_tsu_reg(TSU_PRISL1);
add_tsu_reg(TSU_FWSL0);
add_tsu_reg(TSU_FWSL1);
}
add_tsu_reg(TSU_FWSLC); if (cd->dual_port) {
add_tsu_reg(TSU_QTAGM0);
add_tsu_reg(TSU_QTAGM1);
add_tsu_reg(TSU_FWSR);
add_tsu_reg(TSU_FWINMK);
add_tsu_reg(TSU_ADQT0);
add_tsu_reg(TSU_ADQT1);
add_tsu_reg(TSU_VTAG0);
add_tsu_reg(TSU_VTAG1);
}
add_tsu_reg(TSU_ADSBSY);
add_tsu_reg(TSU_TEN);
add_tsu_reg(TSU_POST1);
add_tsu_reg(TSU_POST2);
add_tsu_reg(TSU_POST3);
add_tsu_reg(TSU_POST4); /* This is the start of a table, not just a single register. */ if (buf) { unsignedint i;
mark_reg_valid(TSU_ADRH0); for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
*buf++ = ioread32(mdp->tsu_addr +
mdp->reg_offset[TSU_ADRH0] +
i * 4);
}
len += SH_ETH_TSU_CAM_ENTRIES * 2;
}
if (netif_running(ndev)) {
netif_device_detach(ndev);
netif_tx_disable(ndev);
/* Serialise with the interrupt handler and NAPI, then * disable interrupts. We have to clear the * irq_enabled flag first to ensure that interrupts * won't be re-enabled.
*/
mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
napi_synchronize(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
sh_eth_dev_exit(ndev);
/* Free all the skbuffs in the Rx queue and the DMA buffers. */
sh_eth_ring_free(ndev);
}
/* Set new parameters */
mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending;
if (netif_running(ndev)) {
ret = sh_eth_ring_init(ndev); if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
__func__); return ret;
}
ret = sh_eth_dev_init(ndev); if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__); return ret;
}
/* Free all the skbuffs in the Rx queue. */ for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = cpu_to_le32(0);
rxdesc->addr = cpu_to_le32(0xBADF00D0);
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
} for (i = 0; i < mdp->num_tx_ring; i++) {
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
dma_wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); else
txdesc->status |= cpu_to_le32(TD_TACT);
wmb(); /* cur_tx must be incremented after TACT bit was set */
mdp->cur_tx++;
if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
return NETDEV_TX_OK;
}
/* The statistics registers have write-clear behaviour, which means we * will lose any increment between the read and write. We mitigate * this by only clearing when we read a non-zero value, so we will * never falsely report a total of zero.
*/ staticvoid
sh_eth_update_stat(struct net_device *ndev, unsignedlong *stat, int reg)
{
u32 delta = sh_eth_read(ndev, reg);
/* device close function */ staticint sh_eth_close(struct net_device *ndev)
{ struct sh_eth_private *mdp = netdev_priv(ndev);
netif_stop_queue(ndev);
/* Serialise with the interrupt handler and NAPI, then disable * interrupts. We have to clear the irq_enabled flag first to * ensure that interrupts won't be re-enabled.
*/
mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
sh_eth_dev_exit(ndev);
/* PHY Disconnect */ if (ndev->phydev) {
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
}
free_irq(ndev->irq, ndev);
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
mdp->is_opened = 0;
pm_runtime_put(&mdp->pdev->dev);
return 0;
}
staticint sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
{ if (netif_running(ndev)) return -EBUSY;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.