// SPDX-License-Identifier: GPL-2.0-only /* Altera Triple-Speed Ethernet MAC driver * Copyright (C) 2008-2014 Altera Corporation. All rights reserved * * Contributors: * Dalon Westergreen * Thomas Chou * Ian Abbott * Yuriy Kozlov * Tobias Klauser * Andriy Smolskyy * Roman Bulgakov * Dmytro Mytarchuk * Matthew Gerlach * * Original driver contributed by SLS. * Major updates contributed by GlobalLogic
*/
#define RX_DESCRIPTORS 64 staticint dma_rx_num = RX_DESCRIPTORS;
module_param(dma_rx_num, int, 0644);
MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
#define TX_DESCRIPTORS 64 staticint dma_tx_num = TX_DESCRIPTORS;
module_param(dma_tx_num, int, 0644);
MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
#define POLL_PHY (-1)
/* Make sure DMA buffer size is larger than the max frame size * plus some alignment offset and a VLAN header. If the max frame size is * 1518, a VLAN header would be additional 4 bytes and additional * headroom for alignment is 2 bytes, 2048 is just fine.
*/ #define ALTERA_RXDMABUFFER_SIZE 2048
/* Allow network stack to resume queuing packets after we've * finished transmitting at least 1/4 of the packets in the queue.
*/ #define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
staticint alloc_init_skbufs(struct altera_tse_private *priv)
{ unsignedint rx_descs = priv->rx_ring_size; unsignedint tx_descs = priv->tx_ring_size; int ret = -ENOMEM; int i;
/* Create Rx ring buffer */
priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
GFP_KERNEL); if (!priv->rx_ring) goto err_rx_ring;
/* Create Tx ring buffer */
priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
GFP_KERNEL); if (!priv->tx_ring) goto err_tx_ring;
priv->tx_cons = 0;
priv->tx_prod = 0;
/* Init Rx ring */ for (i = 0; i < rx_descs; i++) {
ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
priv->rx_dma_buf_sz); if (ret) goto err_init_rx_buffers;
}
/* Release the DMA TX/RX socket buffers */ for (i = 0; i < rx_descs; i++)
tse_free_rx_buffer(priv, &priv->rx_ring[i]); for (i = 0; i < tx_descs; i++)
tse_free_tx_buffer(priv, &priv->tx_ring[i]);
kfree(priv->tx_ring);
}
/* Reallocate the skb for the reception process
*/ staticinlinevoid tse_rx_refill(struct altera_tse_private *priv)
{ unsignedint rxsize = priv->rx_ring_size; unsignedint entry; int ret;
for (; priv->rx_cons - priv->rx_prod > 0;
priv->rx_prod++) {
entry = priv->rx_prod % rxsize; if (likely(priv->rx_ring[entry].skb == NULL)) {
ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
priv->rx_dma_buf_sz); if (unlikely(ret != 0)) break;
priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
}
}
}
/* Pull out the VLAN tag and fix up the packet
*/ staticinlinevoid tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{ struct ethhdr *eth_hdr;
u16 vid;
/* Receive a packet: retrieve and pass over to upper levels
*/ staticint tse_rx(struct altera_tse_private *priv, int limit)
{ unsignedint entry = priv->rx_cons % priv->rx_ring_size; unsignedint next_entry; unsignedint count = 0; struct sk_buff *skb;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
/* Check for count < limit first as get_rx_status is changing * the response-fifo so we must process the next packet * after calling get_rx_status if a response is pending. * (reading the last byte of the response pops the value from the fifo.)
*/ while ((count < limit) &&
((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
/* DMA transfer from TSE starts with 2 additional bytes for * IP payload alignment. Status returned by get_rx_status() * contains DMA transfer length. Packet is 2 bytes shorter.
*/
pktlength -= 2;
if (likely(napi_schedule_prep(&priv->napi))) {
spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
}
/* Transmit a packet (called by the kernel). Dispatches * either the SGDMA method for transmitting or the * MSGDMA method, assumes no scatter/gather support, * implying an assumption that there's only one * physically contiguous fragment starting at * skb->data, for length of skb_headlen(skb).
*/ static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ struct altera_tse_private *priv = netdev_priv(dev); unsignedint nopaged_len = skb_headlen(skb); unsignedint txsize = priv->tx_ring_size; int nfrags = skb_shinfo(skb)->nr_frags; struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr; unsignedint entry;
spin_lock_bh(&priv->tx_lock);
if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); /* This is a hard error, log it. */
netdev_err(priv->dev, "%s: Tx list full when queue awake\n",
__func__);
}
ret = NETDEV_TX_BUSY; goto out;
}
/* Map the first skb fragment */
entry = priv->tx_prod % txsize;
buffer = &priv->tx_ring[entry];
/* Set primary MAC address */
csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
}
/* MAC software reset. * When reset is triggered, the MAC function completes the current * transmission or reception, and subsequently disables the transmit and * receive logic, flushes the receive FIFO buffer, and resets the statistics * counters.
*/ staticint reset_mac(struct altera_tse_private *priv)
{ int counter;
u32 dat;
dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
MAC_CMDCFG_SW_RESET)) break;
udelay(1);
}
if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~MAC_CMDCFG_SW_RESET;
csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); return -1;
} return 0;
}
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit * start address
*/
tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
/* Change the MTU
*/ staticint tse_change_mtu(struct net_device *dev, int new_mtu)
{ if (netif_running(dev)) {
netdev_err(dev, "must be stopped to change its MTU\n"); return -EBUSY;
}
/* set the hash filter */ for (i = 0; i < 64; i++)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
/* Set or clear the multicast filter for this adapter
*/ staticvoid tse_set_rx_mode_hashfilter(struct net_device *dev)
{ struct altera_tse_private *priv = netdev_priv(dev);
spin_lock(&priv->mac_cfg_lock);
if (dev->flags & IFF_PROMISC)
tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
MAC_CMDCFG_PROMIS_EN);
if (dev->flags & IFF_ALLMULTI)
altera_tse_set_mcfilterall(dev); else
altera_tse_set_mcfilter(dev);
spin_unlock(&priv->mac_cfg_lock);
}
/* Set or clear the multicast filter for this adapter
*/ staticvoid tse_set_rx_mode(struct net_device *dev)
{ struct altera_tse_private *priv = netdev_priv(dev);
/* Open and initialize the interface
*/ staticint tse_open(struct net_device *dev)
{ struct altera_tse_private *priv = netdev_priv(dev); unsignedlong flags; int ret = 0; int i;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv); if (ret != 0) {
netdev_err(dev, "Cannot initialize DMA\n"); goto phy_error;
}
if (netif_msg_ifup(priv))
netdev_warn(dev, "device MAC address %pM\n",
dev->dev_addr);
ret = reset_mac(priv); /* Note that reset_mac will fail if the clocks are gated by the PHY * due to the PHY being put into isolation or power down mode. * This is not an error if reset fails due to no clock.
*/ if (ret)
netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
ret = init_mac(priv);
spin_unlock(&priv->mac_cfg_lock); if (ret) {
netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); goto alloc_skbuf_error;
}
priv->dmaops->reset_dma(priv);
/* Create and initialize the TX/RX descriptors chains. */
priv->rx_ring_size = dma_rx_num;
priv->tx_ring_size = dma_tx_num;
ret = alloc_init_skbufs(priv); if (ret) {
netdev_err(dev, "DMA descriptors initialization failed\n"); goto alloc_skbuf_error;
}
/* Register RX interrupt */
ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
dev->name, dev); if (ret) {
netdev_err(dev, "Unable to register RX interrupt %d\n",
priv->rx_irq); goto init_error;
}
/* Register TX interrupt */
ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
dev->name, dev); if (ret) {
netdev_err(dev, "Unable to register TX interrupt %d\n",
priv->tx_irq); goto tx_request_irq_error;
}
/* Stop TSE MAC interface and put the device in an inactive state
*/ staticint tse_shutdown(struct net_device *dev)
{ struct altera_tse_private *priv = netdev_priv(dev); unsignedlongint flags; int ret;
/* Free the IRQ lines */
free_irq(priv->rx_irq, dev);
free_irq(priv->tx_irq, dev);
/* disable and reset the MAC, empties fifo */
spin_lock(&priv->mac_cfg_lock);
spin_lock(&priv->tx_lock);
ret = reset_mac(priv); /* Note that reset_mac will fail if the clocks are gated by the PHY * due to the PHY being put into isolation or power down mode. * This is not an error if reset fails due to no clock.
*/ if (ret)
netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
priv->dmaops->reset_dma(priv);
free_skbufs(dev);
if (priv->dmaops &&
priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { /* Get the mapped address to the SGDMA descriptor memory */
ret = request_and_map(pdev, "s1", &dma_res, &descmap); if (ret) goto err_free_netdev;
/* Start of that memory is for transmit descriptors */
priv->tx_dma_desc = descmap;
/* First half is for tx descriptors, other half for tx */
priv->txdescmem = resource_size(dma_res)/2;
if (upper_32_bits(priv->rxdescmem_busaddr)) {
dev_dbg(priv->device, "SGDMA bus addresses greater than 32-bits\n");
ret = -EINVAL; goto err_free_netdev;
} if (upper_32_bits(priv->txdescmem_busaddr)) {
dev_dbg(priv->device, "SGDMA bus addresses greater than 32-bits\n");
ret = -EINVAL; goto err_free_netdev;
}
} elseif (priv->dmaops &&
priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
ret = request_and_map(pdev, "rx_resp", &dma_res,
&priv->rx_dma_resp); if (ret) goto err_free_netdev;
ret = request_and_map(pdev, "tx_desc", &dma_res,
&priv->tx_dma_desc); if (ret) goto err_free_netdev;
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
(void __iomem **)&priv->mac_dev); if (ret) goto err_free_netdev;
/* xSGDMA Rx Dispatcher address space */
ret = request_and_map(pdev, "rx_csr", &dma_res,
&priv->rx_dma_csr); if (ret) goto err_free_netdev;
/* xSGDMA Tx Dispatcher address space */
ret = request_and_map(pdev, "tx_csr", &dma_res,
&priv->tx_dma_csr); if (ret) goto err_free_netdev;
memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
memset(&mrc, 0, sizeof(mrc)); /* SGMII PCS address space. The location can vary depending on how the * IP is integrated. We can have a resource dedicated to it at a specific * address space, but if it's not the case, we fallback to the mdiophy0 * from the MAC's address space
*/
ret = request_and_map(pdev, "pcs", &pcs_res, &priv->pcs_base); if (ret) { /* If we can't find a dedicated resource for the PCS, fallback * to the internal PCS, that has a different address stride
*/
priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
pcs_regmap_cfg.reg_bits = 32; /* Values are MDIO-like values, on 16 bits */
pcs_regmap_cfg.val_bits = 16;
pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2);
} else {
pcs_regmap_cfg.reg_bits = 16;
pcs_regmap_cfg.val_bits = 16;
pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
}
/* Create a regmap for the PCS so that it can be used by the PCS driver */
pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base,
&pcs_regmap_cfg); if (IS_ERR(pcs_regmap)) {
ret = PTR_ERR(pcs_regmap); goto err_free_netdev;
}
mrc.regmap = pcs_regmap;
mrc.parent = &pdev->dev;
mrc.valid_addr = 0x0;
mrc.autoscan = false;
/* get FIFO depths from device tree */ if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
&priv->rx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
ret = -ENXIO; goto err_free_netdev;
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
&priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO; goto err_free_netdev;
}
/* get hash filter settings for this instance */
priv->hash_filter =
of_property_read_bool(pdev->dev.of_node, "altr,has-hash-multicast-filter");
/* Set hash filter to not set for now until the * multicast filter receive issue is debugged
*/
priv->hash_filter = 0;
/* get supplemental address settings for this instance */
priv->added_unicast =
of_property_read_bool(pdev->dev.of_node, "altr,has-supplementary-unicast");
priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; /* Max MTU is 1500, ETH_DATA_LEN */
priv->dev->max_mtu = ETH_DATA_LEN;
/* Get the max mtu from the device tree. Note that the * "max-frame-size" parameter is actually max mtu. Definition * in the ePAPR v1.1 spec and usage differ, so go with usage.
*/
of_property_read_u32(pdev->dev.of_node, "max-frame-size",
&priv->dev->max_mtu);
/* The DMA buffer size already accounts for an alignment bias * to avoid unaligned access exceptions for the NIOS processor,
*/
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
ret = of_get_ethdev_address(pdev->dev.of_node, ndev); if (ret)
eth_hw_addr_random(ndev);
/* get phy addr and create mdio */
ret = altera_tse_phy_get_addr_mdio_create(ndev);
if (priv->hash_filter)
altera_tse_netdev_ops.ndo_set_rx_mode =
tse_set_rx_mode_hashfilter;
/* Scatter/gather IO is not supported, * so it is turned off
*/
ndev->hw_features &= ~NETIF_F_SG;
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
/* VLAN offloading of tagging, stripping and filtering is not * supported by hardware, but driver will accommodate the * extra 4-byte VLAN tag for processing by upper layers
*/
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
netif_napi_add(ndev, &priv->napi, tse_poll);
netif_carrier_off(ndev);
ret = register_netdev(ndev); if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n"); goto err_register_netdev;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.