// SPDX-License-Identifier: GPL-2.0-or-later /* * PXA168 ethernet driver. * Most of the code is derived from mv643xx ethernet driver. * * Copyright (C) 2010 Marvell International Ltd. * Sachin Sanap <ssanap@marvell.com> * Zhangfei Gao <zgao6@marvell.com> * Philip Rakity <prakity@marvell.com> * Mark Brown <markb@marvell.com>
*/
struct tx_desc {
u32 cmd_sts; /* Command/status field */
u16 reserved;
u16 byte_cnt; /* buffer byte count */
u32 buf_ptr; /* pointer to buffer for this descriptor */
u32 next_desc_ptr; /* Pointer to next descriptor */
};
struct pxa168_eth_private { struct platform_device *pdev; int port_num; /* User Ethernet port number */ int phy_addr; int phy_speed; int phy_duplex;
phy_interface_t phy_intf;
int rx_resource_err; /* Rx ring resource error flag */
/* Next available and first returning Rx resource */ int rx_curr_desc_q, rx_used_desc_q;
/* Next available and first returning Tx resource */ int tx_curr_desc_q, tx_used_desc_q;
struct rx_desc *p_rx_desc_area;
dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb;
struct tx_desc *p_tx_desc_area;
dma_addr_t tx_desc_dma; int tx_desc_area_size; struct sk_buff **tx_skb;
struct work_struct tx_timeout_task;
struct net_device *dev; struct napi_struct napi;
u8 work_todo; int skb_size;
/* Size of Tx Ring per queue */ int tx_ring_size; /* Number of tx descriptors in use */ int tx_desc_count; /* Size of Rx Ring per queue */ int rx_ring_size; /* Number of rx descriptors in use */ int rx_desc_count;
/* * Used in case RX Ring is empty, which can occur when * system does not have resources (skb's)
*/ struct timer_list timeout; struct mii_bus *smi_bus;
/* Return the descriptor to DMA ownership */
dma_wmb();
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
dma_wmb();
/* Move the used descriptor pointer to the next descriptor */
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
/* Any Rx return cancels the Rx resource error status */
pep->rx_resource_err = 0;
skb_reserve(skb, ETH_HW_IP_ALIGN);
}
/* * If RX ring is empty of SKB, set a timer to try allocating * again at a later time.
*/ if (pep->rx_desc_count == 0) {
pep->timeout.expires = jiffies + (HZ / 10);
add_timer(&pep->timeout);
}
}
/* * ---------------------------------------------------------------------------- * This function will add/del an entry to the address table. * Inputs * pep - ETHERNET . * mac_addr - MAC address. * skip - if 1, skip this address.Used in case of deleting an entry which is a * part of chain in the hash table.We can't just delete the entry since * that will break the chain.We need to defragment the tables time to * time. * rd - 0 Discard packet upon match. * - 1 Receive packet upon match. * Outputs * address table entry is added/deleted. * 0 if success. * -ENOSPC if table full
*/ staticint add_del_hash_entry(struct pxa168_eth_private *pep, constunsignedchar *mac_addr,
u32 rd, u32 skip, int del)
{ struct addr_table_entry *entry, *start;
u32 new_high;
u32 new_low;
u32 i;
/* * Pick the appropriate table, start scanning for free/reusable * entries at the index obtained by hashing the specified MAC address
*/
start = pep->htpr;
entry = start + hash_function(mac_addr); for (i = 0; i < HOP_NUMBER; i++) { if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { break;
} else { /* if same address put in same position */ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
(new_low & 0xfffffff8)) &&
(le32_to_cpu(entry->hi) == new_high)) { break;
}
} if (entry == start + 0x7ff)
entry = start; else
entry++;
}
if (i == HOP_NUMBER) { if (!del) {
netdev_info(pep->dev, "%s: table section is full, need to " "move to 16kB implementation?\n",
__FILE__); return -ENOSPC;
} else return 0;
}
/* * ---------------------------------------------------------------------------- * Create an addressTable entry from MAC address info * found in the specifed net_device struct * * Input : pointer to ethernet interface network device structure * Output : N/A
*/ staticvoid update_hash_table_mac_address(struct pxa168_eth_private *pep, unsignedchar *oaddr, constunsignedchar *addr)
{ /* Delete old entry */ if (oaddr)
add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); /* Add new entry */
add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
}
staticint init_hash_table(struct pxa168_eth_private *pep)
{ /* * Hardware expects CPU to build a hash table based on a predefined * hash function and populate it based on hardware address. The * location of the hash table is identified by 32-bit pointer stored * in HTPR internal register. Two possible sizes exists for the hash * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB * (16kB of DRAM required (4 x 4 kB banks)).We currently only support * 1/2kB.
*/ /* TODO: Add support for 8kB hash table and alternative hash * function.Driver can dynamically switch to them if the 1/2kB hash * table is full.
*/ if (!pep->htpr) {
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
HASH_ADDR_TABLE_SIZE,
&pep->htpr_dma, GFP_KERNEL); if (!pep->htpr) return -ENOMEM;
} else {
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
}
wrl(pep, HTPR, pep->htpr_dma); return 0;
}
val = rdl(pep, PORT_CONFIG); if (dev->flags & IFF_PROMISC)
val |= PCR_PM; else
val &= ~PCR_PM;
wrl(pep, PORT_CONFIG, val);
/* * Remove the old list of MAC address and add dev->addr * and multicast address.
*/
memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
update_hash_table_mac_address(pep, NULL, dev->dev_addr);
/* Stop all interrupts for receive, transmit and error. */
wrl(pep, INT_MASK, 0);
/* Clear all interrupts */
wrl(pep, INT_CAUSE, 0);
/* Stop RX DMA */
val = rdl(pep, SDMA_CMD);
val &= ~SDMA_CMD_ERD; /* abort dma command */
/* Abort any transmit and receive operations and put DMA * in idle state.
*/
abort_dma(pep);
/* Disable port */
val = rdl(pep, PORT_CONFIG);
val &= ~PCR_EN;
wrl(pep, PORT_CONFIG, val);
phy_stop(dev->phydev);
}
/* * txq_reclaim - Free the tx desc data for completed descriptors * If force is non-zero, frees uncompleted descriptors as well
*/ staticint txq_reclaim(struct net_device *dev, int force)
{ struct pxa168_eth_private *pep = netdev_priv(dev); struct tx_desc *desc;
u32 cmd_sts; struct sk_buff *skb; int tx_index;
dma_addr_t addr; int count; int released = 0;
while (budget-- > 0) { int rx_next_curr_desc, rx_curr_desc, rx_used_desc; struct rx_desc *rx_desc; unsignedint cmd_sts;
/* Do not process Rx ring in case of Rx ring resource error */ if (pep->rx_resource_err) break;
rx_curr_desc = pep->rx_curr_desc_q;
rx_used_desc = pep->rx_used_desc_q;
rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
dma_rmb(); if (cmd_sts & (BUF_OWNED_BY_DMA)) break;
skb = pep->rx_skb[rx_curr_desc];
pep->rx_skb[rx_curr_desc] = NULL;
/* Rx descriptors exhausted. */ /* Set the Rx ring resource error flag */ if (rx_next_curr_desc == rx_used_desc)
pep->rx_resource_err = 1;
pep->rx_desc_count--;
dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
rx_desc->buf_size,
DMA_FROM_DEVICE);
received_packets++; /* * Update statistics. * Note byte count includes 4 byte CRC count
*/
stats->rx_packets++;
stats->rx_bytes += rx_desc->byte_cnt; /* * In case received a packet without first / last bits on OR * the error summary bit is on, the packets needs to be droped.
*/ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
(RX_FIRST_DESC | RX_LAST_DESC))
|| (cmd_sts & RX_ERROR)) {
stats->rx_dropped++; if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
(RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit())
netdev_err(dev, "Rx pkt on multiple desc\n");
} if (cmd_sts & RX_ERROR)
stats->rx_errors++;
dev_kfree_skb_irq(skb);
} else { /* * The -4 is for the CRC in the trailer of the * received packet
*/
skb_put(skb, rx_desc->byte_cnt - 4);
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
}
} /* Fill RX ring with skb's */
rxq_refill(dev); return received_packets;
}
staticint pxa168_eth_collect_events(struct pxa168_eth_private *pep, struct net_device *dev)
{
u32 icr; int ret = 0;
icr = rdl(pep, INT_CAUSE); if (icr == 0) return IRQ_NONE;
wrl(pep, INT_CAUSE, ~icr); if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
pep->work_todo |= WORK_TX_DONE;
ret = 1;
} if (icr & ICR_RXBUF)
ret = 1; return ret;
}
staticvoid pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
{ int skb_size;
/* * Reserve 2+14 bytes for an ethernet header (the hardware * automatically prepends 2 bytes of dummy data to each * received packet), 16 bytes for up to four VLAN tags, and * 4 bytes for the trailing FCS -- 36 bytes total.
*/
skb_size = pep->dev->mtu + 36;
/* * Make sure that the skb size is a multiple of 8 bytes, as * the lower three bits of the receive descriptor's buffer * size field are ignored by the hardware.
*/
pep->skb_size = (skb_size + 7) & ~7;
/* * If NET_SKB_PAD is smaller than a cache line, * netdev_alloc_skb() will cause skb->data to be misaligned * to a cache line boundary. If this is the case, include * some extra space to allow re-aligning the data area.
*/
pep->skb_size += SKB_DMA_REALIGN;
}
staticint set_port_config_ext(struct pxa168_eth_private *pep)
{ int skb_size;
/* Extended Port Configuration */
wrl(pep, PORT_CONFIG_EXT,
PCXR_AN_SPEED_DIS | /* Disable HW AN */
PCXR_AN_DUPLEX_DIS |
PCXR_AN_FLOWCTL_DIS |
PCXR_2BSM | /* Two byte prefix aligns IP hdr */
PCXR_DSCP_EN | /* Enable DSCP in IP */
skb_size | PCXR_FLP | /* do not force link pass */
PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
/* * Stop and then re-open the interface. This will allocate RX * skbs of the new MTU. * There is a possible danger that the open will not succeed, * due to memory being full.
*/
pxa168_eth_stop(dev); if (pxa168_eth_open(dev)) {
dev_err(&dev->dev, "fatal error on re-opening device after MTU change\n");
}
return 0;
}
staticint eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
{ int tx_desc_curr;
/* * We call txq_reclaim every time since in NAPI interrupts are disabled * and due to this we miss the TX_DONE interrupt, which is not updated * in interrupt status register.
*/
txq_reclaim(dev, 0); if (netif_queue_stopped(dev)
&& pep->tx_ring_size - pep->tx_desc_count > 1) {
netif_wake_queue(dev);
}
work_done = rxq_process(dev, budget); if (work_done < budget) {
napi_complete_done(napi, work_done);
wrl(pep, INT_MASK, ALL_INTS);
}
stats->tx_bytes += length;
stats->tx_packets++;
netif_trans_update(dev); if (pep->tx_ring_size - pep->tx_desc_count <= 1) { /* We handled the current skb, but now we are out of space.*/
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
staticint smi_wait_ready(struct pxa168_eth_private *pep)
{ int i = 0;
/* wait for the SMI register to become available */ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) return -ETIMEDOUT;
msleep(10);
}
return 0;
}
staticint pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
{ struct pxa168_eth_private *pep = bus->priv; int i = 0; int val;
if (smi_wait_ready(pep)) {
netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT;
}
wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); /* now wait for the data to be valid */ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) {
netdev_warn(pep->dev, "pxa168_eth: SMI bus read not valid\n"); return -ENODEV;
}
msleep(10);
}
return val & 0xffff;
}
staticint pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
u16 value)
{ struct pxa168_eth_private *pep = bus->priv;
if (smi_wait_ready(pep)) {
netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT;
}
clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n"); return -ENODEV;
}
dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); if (!dev) return -ENOMEM;
err = of_get_ethdev_address(pdev->dev.of_node, dev); if (err) {
u8 addr[ETH_ALEN];
/* try reading the mac address, if set by the bootloader */
pxa168_eth_get_mac_address(dev, addr); if (is_valid_ether_addr(addr)) {
eth_hw_addr_set(dev, addr);
} else {
dev_info(&pdev->dev, "Using random mac address\n");
eth_hw_addr_random(dev);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.