// SPDX-License-Identifier: GPL-2.0-only /* * Xilinx Axi Ethernet device driver * * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> * Copyright (c) 2010 - 2011 PetaLogix * Copyright (c) 2019 - 2022 Calian Advanced Technologies * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. * * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 * and Spartan6. * * TODO: * - Add Axi Fifo support. * - Factor out Axi DMA code into separate driver. * - Test and fix basic multicast filtering. * - Add support for extended multicast filtering. * - Test basic VLAN support. * - Add support for extended VLAN support.
*/
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */ #define DRIVER_NAME "xaxienet" #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" #define DRIVER_VERSION "1.00a"
/** * axienet_dma_in32 - Memory mapped Axi DMA register read * @lp: Pointer to axienet local structure * @reg: Address offset from the base address of the Axi DMA core * * Return: The contents of the Axi DMA register * * This function returns the contents of the corresponding Axi DMA register.
*/ staticinline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
{ return ioread32(lp->dma_regs + reg);
}
if (lp->features & XAE_FEATURE_DMA_64BIT)
ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
return ret;
}
/** * axienet_dma_bd_release - Release buffer descriptor rings * @ndev: Pointer to the net_device structure * * This function is used to release the descriptors allocated in * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet * driver stop api is called.
*/ staticvoid axienet_dma_bd_release(struct net_device *ndev)
{ int i; struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */
dma_free_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
if (!lp->rx_bd_v) return;
for (i = 0; i < lp->rx_bd_num; i++) {
dma_addr_t phys;
/* A NULL skb means this descriptor has not been initialised * at all.
*/ if (!lp->rx_bd_v[i].skb) break;
dev_kfree_skb(lp->rx_bd_v[i].skb);
/* For each descriptor, we programmed cntrl with the (non-zero) * descriptor size, after it had been successfully allocated. * So a non-zero value in there means we need to unmap it.
*/ if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
}
}
static u64 axienet_dma_rate(struct axienet_local *lp)
{ if (lp->axi_clk) return clk_get_rate(lp->axi_clk); return 125000000; /* arbitrary guess if no clock rate set */
}
/** * axienet_calc_cr() - Calculate control register value * @lp: Device private data * @count: Number of completions before an interrupt * @usec: Microseconds after the last completion before an interrupt * * Calculate a control register value based on the coalescing settings. The * run/stop bit is not set.
*/ static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
u32 cr;
cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
XAXIDMA_IRQ_ERROR_MASK; /* Only set interrupt delay timer if not generating an interrupt on * the first packet. Otherwise leave at 0 to disable delay interrupt.
*/ if (count > 1) {
u64 clk_rate = axienet_dma_rate(lp);
u32 timer;
/* 1 Timeout Interval = 125 * (clock period of SG clock) */
timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
XAXIDMA_DELAY_SCALE);
/** * axienet_coalesce_params() - Extract coalesce parameters from the CR * @lp: Device private data * @cr: The control register to parse * @count: Number of packets before an interrupt * @usec: Idle time (in usec) before an interrupt
*/ staticvoid axienet_coalesce_params(struct axienet_local *lp, u32 cr,
u32 *count, u32 *usec)
{
u64 clk_rate = axienet_dma_rate(lp);
u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
/** * axienet_dma_start - Set up DMA registers and start DMA operation * @lp: Pointer to the axienet_local structure
*/ staticvoid axienet_dma_start(struct axienet_local *lp)
{
spin_lock_irq(&lp->rx_cr_lock);
/* Start updating the Rx channel control register */
lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception.
*/
axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
lp->rx_dma_started = true;
/* Start updating the Tx channel control register */
lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting.
*/
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
lp->tx_dma_started = true;
spin_unlock_irq(&lp->tx_cr_lock);
}
/** * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA * @ndev: Pointer to the net_device structure * * Return: 0, on success -ENOMEM, on failure * * This function is called to initialize the Rx and Tx DMA descriptor * rings. This initializes the descriptors with required default values * and is called when Axi Ethernet driver reset is called.
*/ staticint axienet_dma_bd_init(struct net_device *ndev)
{ int i; struct sk_buff *skb; struct axienet_local *lp = netdev_priv(ndev);
/* Reset the indexes which are used for accessing the BDs */
lp->tx_bd_ci = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_alloc_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) return -ENOMEM;
/** * axienet_set_mac_address - Write the MAC address * @ndev: Pointer to the net_device structure * @address: 6 byte Address to be written as MAC address * * This function is called to initialize the MAC address of the Axi Ethernet * core. It writes to the UAW0 and UAW1 registers of the core.
*/ staticvoid axienet_set_mac_address(struct net_device *ndev, constvoid *address)
{ struct axienet_local *lp = netdev_priv(ndev);
if (address)
eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
axienet_iow(lp, XAE_UAW1_OFFSET,
(((axienet_ior(lp, XAE_UAW1_OFFSET)) &
~XAE_UAW1_UNICASTADDR_MASK) |
(ndev->dev_addr[4] |
(ndev->dev_addr[5] << 8))));
}
/** * netdev_set_mac_address - Write the MAC address (from outside the driver) * @ndev: Pointer to the net_device structure * @p: 6 byte Address to be written as MAC address * * Return: 0 for all conditions. Presently, there is no failure case. * * This function is called to initialize the MAC address of the Axi Ethernet * core. It calls the core specific axienet_set_mac_address. This is the * function that goes into net_device_ops structure entry ndo_set_mac_address.
*/ staticint netdev_set_mac_address(struct net_device *ndev, void *p)
{ struct sockaddr *addr = p;
/** * axienet_set_multicast_list - Prepare the multicast table * @ndev: Pointer to the net_device structure * * This function is called to initialize the multicast table during * initialization. The Axi Ethernet basic multicast support has a four-entry * multicast table which is initialized here. Additionally this function * goes into the net_device_ops structure entry ndo_set_multicast_list. This * means whenever the multicast table entries need to be updated this * function gets called.
*/ staticvoid axienet_set_multicast_list(struct net_device *ndev)
{ int i = 0;
u32 reg, af0reg, af1reg; struct axienet_local *lp = netdev_priv(ndev);
/** * axienet_setoptions - Set an Axi Ethernet option * @ndev: Pointer to the net_device structure * @options: Option to be enabled/disabled * * The Axi Ethernet core has multiple features which can be selectively turned * on or off. The typical options could be jumbo frame option, basic VLAN * option, promiscuous mode option etc. This function is used to set or clear * these options in the Axi Ethernet hardware. This is done through * axienet_option structure .
*/ staticvoid axienet_setoptions(struct net_device *ndev, u32 options)
{ int reg; struct axienet_local *lp = netdev_priv(ndev); struct axienet_option *tp = &axienet_options[0];
/* Just less than 2^32 bytes at 2.5 GBit/s */
schedule_delayed_work(&lp->stats_work, 13 * HZ);
}
staticint __axienet_device_reset(struct axienet_local *lp)
{
u32 value; int ret;
/* Save statistics counters in case they will be reset */
mutex_lock(&lp->stats_lock); if (lp->features & XAE_FEATURE_STATS)
axienet_stats_update(lp, true);
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this * reset process. * Note that even though both TX and RX have their own reset register, * they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
ret = read_poll_timeout(axienet_dma_in32, value,
!(value & XAXIDMA_CR_RESET_MASK),
DELAY_OF_ONE_MILLISEC, 50000, false, lp,
XAXIDMA_TX_CR_OFFSET); if (ret) {
dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); goto out;
}
/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
ret = read_poll_timeout(axienet_ior, value,
value & XAE_INT_PHYRSTCMPLT_MASK,
DELAY_OF_ONE_MILLISEC, 50000, false, lp,
XAE_IS_OFFSET); if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); goto out;
}
/* Update statistics counters with new values */ if (lp->features & XAE_FEATURE_STATS) { enum temac_stat stat;
write_seqcount_begin(&lp->hw_stats_seqcount);
lp->reset_in_progress = false; for (stat = 0; stat < STAT_COUNT; stat++) {
u32 counter =
axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
/* Give DMAs a chance to halt gracefully */
sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
msleep(20);
sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
}
sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
msleep(20);
sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
}
/* Do a reset to ensure DMA is really stopped */
axienet_lock_mii(lp);
__axienet_device_reset(lp);
axienet_unlock_mii(lp);
}
/** * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. * @ndev: Pointer to the net_device structure * * This function is called to reset and initialize the Axi Ethernet core. This * is typically called during initialization. It does a reset of the Axi DMA * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines * are connected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. * Returns 0 on success or a negative error number otherwise.
*/ staticint axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); int ret;
/* Sync default options with HW but leave receiver and * transmitter disabled.
*/
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
return 0;
}
/** * axienet_free_tx_chain - Clean up a series of linked TX descriptors. * @lp: Pointer to the axienet_local structure * @first_bd: Index of first descriptor to clean up * @nr_bds: Max number of descriptors to clean up * @force: Whether to clean descriptors even if not complete * @sizep: Pointer to a u32 filled with the total sum of all bytes * in all cleaned-up descriptors. Ignored if NULL. * @budget: NAPI budget (use 0 when not called from NAPI poll) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. * Returns the number of packets handled.
*/ staticint axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget)
{ struct axidma_bd *cur_p; unsignedint status; int i, packets = 0;
dma_addr_t phys;
for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
status = cur_p->status;
/* If force is not specified, clean up only descriptors * that have been completed by the MAC.
*/ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break;
/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
if (!force) {
lp->tx_bd_ci += i; if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci %= lp->tx_bd_num;
}
return packets;
}
/** * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy * @lp: Pointer to the axienet_local structure * @num_frag: The number of BDs to check for * * Return: 0, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked before BDs are allocated and transmission starts. * This function returns 0 if a BD or group of BDs can be allocated for * transmission. If the BD or any of the BDs are not free the function * returns a busy status.
*/ staticinlineint axienet_check_tx_bd_space(struct axienet_local *lp, int num_frag)
{ struct axidma_bd *cur_p;
/* Ensure we see all descriptor updates from device or TX polling */
rmb();
cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
lp->tx_bd_num]; if (cur_p->cntrl) return NETDEV_TX_BUSY; return 0;
}
/** * axienet_dma_tx_cb - DMA engine callback for TX channel. * @data: Pointer to the axienet_local structure. * @result: error reporting through dmaengine_result. * This function is called by dmaengine driver for TX channel to notify * that the transmit is done.
*/ staticvoid axienet_dma_tx_cb(void *data, conststruct dmaengine_result *result)
{ struct skbuf_dma_descriptor *skbuf_dma; struct axienet_local *lp = data; struct netdev_queue *txq; int len;
/** * axienet_start_xmit_dmaengine - Starts the transmission. * @skb: sk_buff pointer that contains data to be Txed. * @ndev: Pointer to net_device structure. * * Return: NETDEV_TX_OK on success or any non space errors. * NETDEV_TX_BUSY when free element in TX skb ring buffer * is not available. * * This function is invoked to initiate transmission. The * function sets the skbs, register dma callback API and submit * the dma transaction. * Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values.
*/ static netdev_tx_t
axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
{ struct dma_async_tx_descriptor *dma_tx_desc = NULL; struct axienet_local *lp = netdev_priv(ndev);
u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; struct skbuf_dma_descriptor *skbuf_dma; struct dma_device *dma_dev; struct netdev_queue *txq;
u32 csum_start_off;
u32 csum_index_off; int sg_len; int ret;
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1; if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev); if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n"); return NETDEV_TX_BUSY;
}
skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); if (!skbuf_dma) goto xmit_error_drop_skb;
lp->tx_ring_head++;
sg_init_table(skbuf_dma->sgl, sg_len);
ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); if (ret < 0) goto xmit_error_drop_skb;
ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); if (!ret) goto xmit_error_drop_skb;
/** * axienet_tx_poll - Invoked once a transmit is completed by the * Axi DMA Tx channel. * @napi: Pointer to NAPI structure. * @budget: Max number of TX packets to process. * * Return: Number of TX packets processed. * * This function is invoked from the NAPI processing to notify the completion * of transmit operation. It clears fields in the corresponding Tx BDs and * unmaps the corresponding buffer so that CPU can regain ownership of the * buffer. It finally invokes "netif_wake_queue" to restart transmission if * required.
*/ staticint axienet_tx_poll(struct napi_struct *napi, int budget)
{ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); struct net_device *ndev = lp->ndev;
u32 size = 0; int packets;
/* Matches barrier in axienet_start_xmit */
smp_mb();
if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_wake_queue(ndev);
}
if (packets < budget && napi_complete_done(napi, packets)) { /* Re-enable TX completion interrupts. This should * cause an immediate interrupt if any TX packets are * already pending.
*/
spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
spin_unlock_irq(&lp->tx_cr_lock);
} return packets;
}
/** * axienet_start_xmit - Starts the transmission. * @skb: sk_buff pointer that contains data to be Txed. * @ndev: Pointer to net_device structure. * * Return: NETDEV_TX_OK, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked from upper layers to initiate transmission. The * function uses the next available free BDs and populates their fields to * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values.
*/ static netdev_tx_t
axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
u32 ii;
u32 num_frag;
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
u32 orig_tail_ptr, new_tail_ptr; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p;
if (axienet_check_tx_bd_space(lp, num_frag + 1)) { /* Should not happen as last start_xmit call should have * checked for sufficient space and queue should only be * woken when sufficient space is available.
*/
netif_stop_queue(ndev); if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n"); return NETDEV_TX_BUSY;
}
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
/* Stop queue if next transmit may not have space */ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
netif_stop_queue(ndev);
/* Matches barrier in axienet_tx_poll */
smp_mb();
/* Space might have just been freed - check again */ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_wake_queue(ndev);
}
return NETDEV_TX_OK;
}
/** * axienet_dma_rx_cb - DMA engine callback for RX channel. * @data: Pointer to the skbuf_dma_descriptor structure. * @result: error reporting through dmaengine_result. * This function is called by dmaengine driver for RX channel to notify * that the packet is received.
*/ staticvoid axienet_dma_rx_cb(void *data, conststruct dmaengine_result *result)
{ struct skbuf_dma_descriptor *skbuf_dma;
size_t meta_len, meta_max_len, rx_len; struct axienet_local *lp = data; struct sk_buff *skb;
u32 *app_metadata; int i;
if (IS_ERR(app_metadata)) { if (net_ratelimit())
netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
dev_kfree_skb_any(skb);
lp->ndev->stats.rx_dropped++; goto rx_submit;
}
rx_submit: for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
RX_BUF_NUM_DEFAULT); i++)
axienet_rx_submit_desc(lp->ndev);
dma_async_issue_pending(lp->rx_chan);
}
/** * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. * @napi: Pointer to NAPI structure. * @budget: Max number of RX packets to process. * * Return: Number of RX packets processed.
*/ staticint axienet_rx_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0; int packets = 0;
dma_addr_t tail_p = 0; struct axidma_bd *cur_p; struct sk_buff *skb, *new_skb; struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
/* Ensure we see complete descriptor update */
dma_rmb();
skb = cur_p->skb;
cur_p->skb = NULL;
/* skb could be NULL if a previous pass already received the * packet for this slot in the ring, but failed to refill it * with a newly allocated buffer. In this case, don't try to * receive it again.
*/ if (likely(skb)) {
length = cur_p->app4 & 0x0000FFFF;
/* Only update tail_p to mark this slot as usable after it has * been successfully refilled.
*/
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) { if (READ_ONCE(lp->rx_dim_enabled)) { struct dim_sample sample = {
.time = ktime_get(), /* Safe because we are the only writer */
.pkt_ctr = u64_stats_read(&lp->rx_packets),
.byte_ctr = u64_stats_read(&lp->rx_bytes),
.event_ctr = READ_ONCE(lp->rx_irqs),
};
net_dim(&lp->rx_dim, &sample);
}
/* Re-enable RX completion interrupts. This should * cause an immediate interrupt if any RX packets are * already pending.
*/
spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
spin_unlock_irq(&lp->rx_cr_lock);
} return packets;
}
/** * axienet_tx_irq - Tx Done Isr. * @irq: irq number * @_ndev: net_device pointer * * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. * * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the * TX BD processing.
*/ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{ unsignedint status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
if (!(status & XAXIDMA_IRQ_ALL_MASK)) return IRQ_NONE;
/** * axienet_init_dmaengine - init the dmaengine code. * @ndev: Pointer to net_device structure * * Return: 0, on success. * non-zero error value on failure * * This is the dmaengine initialization code.
*/ staticint axienet_init_dmaengine(struct net_device *ndev)
{ struct axienet_local *lp = netdev_priv(ndev); struct skbuf_dma_descriptor *skbuf_dma; int i, ret;
lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); if (IS_ERR(lp->tx_chan)) {
dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); return PTR_ERR(lp->tx_chan);
}
lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); if (IS_ERR(lp->rx_chan)) {
ret = PTR_ERR(lp->rx_chan);
dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); goto err_dma_release_tx;
}
lp->tx_ring_tail = 0;
lp->tx_ring_head = 0;
lp->rx_ring_tail = 0;
lp->rx_ring_head = 0;
lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
GFP_KERNEL); if (!lp->tx_skb_ring) {
ret = -ENOMEM; goto err_dma_release_rx;
} for (i = 0; i < TX_BD_NUM_MAX; i++) {
skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); if (!skbuf_dma) {
ret = -ENOMEM; goto err_free_tx_skb_ring;
}
lp->tx_skb_ring[i] = skbuf_dma;
}
lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
GFP_KERNEL); if (!lp->rx_skb_ring) {
ret = -ENOMEM; goto err_free_tx_skb_ring;
} for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); if (!skbuf_dma) {
ret = -ENOMEM; goto err_free_rx_skb_ring;
}
lp->rx_skb_ring[i] = skbuf_dma;
} /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
axienet_rx_submit_desc(ndev);
dma_async_issue_pending(lp->rx_chan);
return 0;
err_free_rx_skb_ring: for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
kfree(lp->rx_skb_ring[i]);
kfree(lp->rx_skb_ring);
err_free_tx_skb_ring: for (i = 0; i < TX_BD_NUM_MAX; i++)
kfree(lp->tx_skb_ring[i]);
kfree(lp->tx_skb_ring);
err_dma_release_rx:
dma_release_channel(lp->rx_chan);
err_dma_release_tx:
dma_release_channel(lp->tx_chan); return ret;
}
/** * axienet_init_legacy_dma - init the dma legacy code. * @ndev: Pointer to net_device structure * * Return: 0, on success. * non-zero error value on failure * * This is the dma initialization code. It also allocates interrupt * service routines, enables the interrupt lines and ISR handling. *
*/ staticint axienet_init_legacy_dma(struct net_device *ndev)
{ int ret; struct axienet_local *lp = netdev_priv(ndev);
/** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure * * Return: 0, on success. * non-zero error value on failure * * This is the driver open routine. It calls phylink_start to start the * PHY device. * It also allocates interrupt service routines, enables the interrupt lines * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer * descriptors are initialized.
*/ staticint axienet_open(struct net_device *ndev)
{ int ret; struct axienet_local *lp = netdev_priv(ndev);
/* When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
axienet_lock_mii(lp);
ret = axienet_device_reset(ndev);
axienet_unlock_mii(lp);
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); if (ret) {
dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); return ret;
}
phylink_start(lp->phylink);
/* Start the statistics refresh work */
schedule_delayed_work(&lp->stats_work, 0);
if (lp->use_dmaengine) { /* Enable interrupts for Axi Ethernet core (if defined) */ if (lp->eth_irq > 0) {
ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
ndev->name, ndev); if (ret) goto err_phy;
}
ret = axienet_init_dmaengine(ndev); if (ret < 0) goto err_free_eth_irq;
} else {
ret = axienet_init_legacy_dma(ndev); if (ret) goto err_phy;
}
/** * axienet_stop - Driver stop routine. * @ndev: Pointer to net_device structure * * Return: 0, on success. * * This is the driver stop routine. It calls phylink_disconnect to stop the PHY * device. It also removes the interrupt handlers and disables the interrupts. * The Axi DMA Tx/Rx BDs are released.
*/ staticint axienet_stop(struct net_device *ndev)
{ struct axienet_local *lp = netdev_priv(ndev); int i;
if (!lp->use_dmaengine) {
WRITE_ONCE(lp->stopping, true);
flush_work(&lp->dma_err_task);
for (i = 0; i < TX_BD_NUM_MAX; i++)
kfree(lp->tx_skb_ring[i]);
kfree(lp->tx_skb_ring); for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
kfree(lp->rx_skb_ring[i]);
kfree(lp->rx_skb_ring);
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev); return 0;
}
/** * axienet_change_mtu - Driver change mtu routine. * @ndev: Pointer to net_device structure * @new_mtu: New mtu value to be applied * * Return: Always returns 0 (success). * * This is the change mtu driver routine. It checks if the Axi Ethernet * hardware supports jumbo frames before changing the mtu. This can be * called only when the device is not up.
*/ staticint axienet_change_mtu(struct net_device *ndev, int new_mtu)
{ struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) return -EBUSY;
if ((new_mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE) > lp->rxmem) return -EINVAL;
WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER /** * axienet_poll_controller - Axi Ethernet poll mechanism. * @ndev: Pointer to net_device structure * * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior * to polling the ISRs and are enabled back after the polling is done.
*/ staticvoid axienet_poll_controller(struct net_device *ndev)
{ struct axienet_local *lp = netdev_priv(ndev);
/** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure * @ed: Pointer to ethtool_drvinfo structure * * This implements ethtool command for getting the driver information. * Issue "ethtool -i ethX" under linux prompt to execute this function.
*/ staticvoid axienet_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
{
strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/** * axienet_ethtools_get_regs_len - Get the total regs length present in the * AxiEthernet core. * @ndev: Pointer to net_device structure * * This implements ethtool command for getting the total register length * information. * * Return: the total regs length
*/ staticint axienet_ethtools_get_regs_len(struct net_device *ndev)
{ returnsizeof(u32) * AXIENET_REGS_N;
}
/** * axienet_ethtools_get_regs - Dump the contents of all registers present * in AxiEthernet core. * @ndev: Pointer to net_device structure * @regs: Pointer to ethtool_regs structure * @ret: Void pointer used to return the contents of the registers. * * This implements ethtool command for getting the Axi Ethernet register dump. * Issue "ethtool -d ethX" to execute this function.
*/ staticvoid axienet_ethtools_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *ret)
{
u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N; struct axienet_local *lp = netdev_priv(ndev);
/** * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) * settings. * @ndev: Pointer to net_device structure * @epauseparm:Pointer to ethtool_pauseparam structure * * This implements ethtool command for enabling flow control on Rx and Tx * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this * function. * * Return: 0 on success, -EFAULT if device is running
*/ staticint
axienet_ethtools_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *epauseparm)
{ struct axienet_local *lp = netdev_priv(ndev);
/** * axienet_update_coalesce_rx() - Set RX CR * @lp: Device private data * @cr: Value to write to the RX CR * @mask: Bits to set from @cr
*/ staticvoid axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
u32 mask)
{
spin_lock_irq(&lp->rx_cr_lock);
lp->rx_dma_cr &= ~mask;
lp->rx_dma_cr |= cr; /* If DMA isn't started, then the settings will be applied the next * time dma_start() is called.
*/ if (lp->rx_dma_started) {
u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
/* Don't enable IRQs if they are disabled by NAPI */ if (reg & XAXIDMA_IRQ_ALL_MASK)
cr = lp->rx_dma_cr; else
cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
}
spin_unlock_irq(&lp->rx_cr_lock);
}
/** * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM * @lp: Device private data
*/ static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
{ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
}
/** * axienet_update_coalesce_tx() - Set TX CR * @lp: Device private data * @cr: Value to write to the TX CR * @mask: Bits to set from @cr
*/ staticvoid axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
u32 mask)
{
spin_lock_irq(&lp->tx_cr_lock);
lp->tx_dma_cr &= ~mask;
lp->tx_dma_cr |= cr; /* If DMA isn't started, then the settings will be applied the next * time dma_start() is called.
*/ if (lp->tx_dma_started) {
u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
/* Don't enable IRQs if they are disabled by NAPI */ if (reg & XAXIDMA_IRQ_ALL_MASK)
cr = lp->tx_dma_cr; else
cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
}
spin_unlock_irq(&lp->tx_cr_lock);
}
/** * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * This implements ethtool command for getting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to * execute this function. * * Return: 0 always
*/ staticint
axienet_ethtools_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack)
{ struct axienet_local *lp = netdev_priv(ndev);
u32 cr;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.