/* * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. * * Copyright (C) 2012 Marvell * * Rami Rosen <rosenr@marvell.com> * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied.
*/
/* Exception Interrupt Port/Queue Cause register * * Their behavior depend of the mapping done using the PCPX2Q * registers. For a given CPU if the bit associated to a queue is not * set, then for the register a read from this CPU will always return * 0 and a write won't do anything
*/
/* bits 0..7 = TXQ SENT, one bit per queue. * bits 8..15 = RXQ OCCUP, one bit per queue. * bits 16..23 = RXQ FREE, one bit per queue. * bit 29 = OLD_REG_SUM, see old reg ? * bit 30 = TX_ERR_SUM, one bit for 4 ports * bit 31 = MISC_SUM, one bit for 4 ports
*/ #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) #define MVNETA_MISCINTR_INTR_MASK BIT(31)
/* The values of the bucket refill base period and refill period are taken from * the reference manual, and adds up to a base resolution of 10Kbps. This allows * to cover all rate-limit values from 10Kbps up to 5Gbps
*/
/* Base period for the rate limit algorithm */ #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
/* Number of Base Period to wait between each bucket refill */ #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
/* The base resolution for rate limiting, in bps. Any max_rate value should be * a multiple of that value.
*/ #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
(MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
/* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not * supported by this driver) or is filled automatically by zeroes on * the RX side. Those two bytes being at the front of the Ethernet * header, they allow to have the IP header aligned on a 4 bytes * boundary automatically: the hardware skips those two bytes on its * own.
*/ #define MVNETA_MH_SIZE 2
/* Number of bytes to be taken into account by HW when putting incoming data * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
*/ #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
/* Flags for special SoC configurations */ bool neta_armada3700; bool neta_ac5;
u16 rx_offset_correction; conststruct mbus_dram_target_info *dram_target_info;
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design
*/
/* Virtual address of the RX buffer */ void **buf_virt_addr;
/* Virtual address of the RX DMA descriptors array */ struct mvneta_rx_desc *descs;
/* DMA address of the RX DMA descriptors array */
dma_addr_t descs_phys;
/* Index of the last RX DMA descriptor */ int last_desc;
/* Index of the next RX DMA descriptor to process */ int next_desc_to_proc;
/* Index of first RX DMA descriptor to refill */ int first_to_refill;
u32 refill_num;
};
staticenum cpuhp_state online_hpstate; /* The hardware supports eight (8) rx queues, but we are only allowing * the first one to be used. Therefore, let's just allocate one queue.
*/ staticint rxq_number = 8; staticint txq_number = 8;
staticint rxq_def;
staticint rx_copybreak __read_mostly = 256;
/* HW BM need that each port be identify by a unique ID */ staticint global_port_id;
/* Checks whether the RX descriptor having this status is both the first * and the last descriptor for the RX packet. Each RX packet is currently * received through a single RX descriptor, so not having each RX * descriptor with its first and last bits set is an error
*/ staticint mvneta_rxq_desc_is_first_last(u32 status)
{ return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
MVNETA_RXD_FIRST_LAST_DESC;
}
/* Add number of descriptors ready to receive new packets */ staticvoid mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int ndescs)
{ /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can * be added at once
*/ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
(MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
}
/* Get number of RX descriptors occupied by received packets */ staticint mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{
u32 val;
val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
}
/* Update num of rx desc called upon return from rx path or * from mvneta_rxq_drop_pkts().
*/ staticvoid mvneta_rxq_desc_num_update(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int rx_done, int rx_filled)
{
u32 val;
/* Only 255 descriptors can be added at once */ while ((rx_done > 0) || (rx_filled > 0)) { if (rx_done <= 0xff) {
val = rx_done;
rx_done = 0;
} else {
val = 0xff;
rx_done -= 0xff;
} if (rx_filled <= 0xff) {
val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
rx_filled = 0;
} else {
val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
rx_filled -= 0xff;
}
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
}
}
/* Get pointer to next RX descriptor to be processed by SW */ staticstruct mvneta_rx_desc *
mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
{ int rx_desc = rxq->next_desc_to_proc;
/* Change maximum receive size of the port. */ staticvoid mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
{
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}
/* Set rx queue offset */ staticvoid mvneta_rxq_offset_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int offset)
{
u32 val;
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
/* Offset is in */
val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
/* Tx descriptors helper methods */
/* Update HW with number of TX descriptors to be sent */ staticvoid mvneta_txq_pend_desc_add(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int pend_desc)
{
u32 val;
pend_desc += txq->pending;
/* Only 255 Tx descriptors can be added at once */ do {
val = min(pend_desc, 255);
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
pend_desc -= val;
} while (pend_desc > 0);
txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */ staticstruct mvneta_tx_desc *
mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
{ int tx_desc = txq->next_desc_to_proc;
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
val |= MVNETA_RXQ_HW_BUF_ALLOC;
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
/* Notify HW about port's assignment of pool for bigger packets */ staticvoid mvneta_rxq_long_pool_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{
u32 val;
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
/* Notify HW about port's assignment of pool for smaller packets */ staticvoid mvneta_rxq_short_pool_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{
u32 val;
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
if (pp->bm_win_id < 0) { /* Find first not occupied window */ for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { if (win_enable & (1 << i)) {
pp->bm_win_id = i; break;
}
} if (i == MVNETA_MAX_DECODE_WIN) return -ENOMEM;
} else {
i = pp->bm_win_id;
}
/* Get BM window information */
err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
&target, &attr); if (err < 0) return err;
pp->bm_win_id = -1;
/* Open NETA -> BM window */
err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
target, attr); if (err < 0) {
netdev_info(pp->dev, "fail to configure mbus window to BM\n"); return err;
} return 0;
}
/* Assign and initialize pools for port. In case of fail * buffer manager will remain disabled for current port.
*/ staticint mvneta_bm_port_init(struct platform_device *pdev, struct mvneta_port *pp)
{ struct device_node *dn = pdev->dev.of_node;
u32 long_pool_id, short_pool_id;
if (!pp->neta_armada3700) { int ret;
ret = mvneta_bm_port_mbus_init(pp); if (ret) return ret;
}
if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
netdev_info(pp->dev, "missing long pool id\n"); return -EINVAL;
}
/* Create port's long pool depending on mtu */
pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
MVNETA_BM_LONG, pp->id,
MVNETA_RX_PKT_SIZE(pp->dev->mtu)); if (!pp->pool_long) {
netdev_info(pp->dev, "fail to obtain long pool for port\n"); return -ENOMEM;
}
/* If short pool id is not defined, assume using single pool */ if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
short_pool_id = long_pool_id;
/* Create port's short pool */
pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
MVNETA_BM_SHORT, pp->id,
MVNETA_BM_SHORT_PKT_SIZE); if (!pp->pool_short) {
netdev_info(pp->dev, "fail to obtain short pool for port\n");
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); return -ENOMEM;
}
/* Update settings of a pool for bigger packets */ staticvoid mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
{ struct mvneta_bm_pool *bm_pool = pp->pool_long; struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; int num;
/* Release all buffers from long pool */
mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); if (hwbm_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n",
bm_pool->id); goto bm_mtu_err;
}
pp->bm_priv = NULL;
pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}
/* Start the Ethernet port RX and TX activity */ staticvoid mvneta_port_up(struct mvneta_port *pp)
{ int queue;
u32 q_map;
/* Stop the Ethernet port activity */ staticvoid mvneta_port_down(struct mvneta_port *pp)
{
u32 val; int count;
/* Stop Rx port activity. Check port Rx activity. */
val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
/* Issue stop command for active channels only */ if (val != 0)
mvreg_write(pp, MVNETA_RXQ_CMD,
val << MVNETA_RXQ_DISABLE_SHIFT);
/* Wait for all Rx activity to terminate. */
count = 0; do { if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
netdev_warn(pp->dev, "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
val); break;
}
mdelay(1);
val = mvreg_read(pp, MVNETA_RXQ_CMD);
} while (val & MVNETA_RXQ_ENABLE_MASK);
/* Stop Tx port activity. Check port Tx activity. Issue stop * command for active channels only
*/
val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
if (val != 0)
mvreg_write(pp, MVNETA_TXQ_CMD,
(val << MVNETA_TXQ_DISABLE_SHIFT));
/* Wait for all Tx activity to terminate. */
count = 0; do { if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
netdev_warn(pp->dev, "TIMEOUT for TX stopped status=0x%08x\n",
val); break;
}
mdelay(1);
/* Check TX Command reg that all Txqs are stopped */
val = mvreg_read(pp, MVNETA_TXQ_CMD);
} while (val & MVNETA_TXQ_ENABLE_MASK);
/* Double check to verify that TX FIFO is empty */
count = 0; do { if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
netdev_warn(pp->dev, "TX FIFO empty timeout status=0x%08x\n",
val); break;
}
mdelay(1);
val = mvreg_read(pp, MVNETA_PORT_STATUS);
} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
(val & MVNETA_TX_IN_PRGRS));
udelay(200);
}
/* Enable the port by setting the port enable bit of the MAC control register */ staticvoid mvneta_port_enable(struct mvneta_port *pp)
{
u32 val;
/* Enable port */
val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
val |= MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}
/* Disable the port and wait for about 200 usec before retuning */ staticvoid mvneta_port_disable(struct mvneta_port *pp)
{
u32 val;
/* Reset the Enable bit in the Serial Control Register */
val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
udelay(200);
}
/* Multicast tables methods */
/* Set all entries in Unicast MAC Table; queue==-1 means reject all */ staticvoid mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
{ int offset;
u32 val;
if (queue == -1) {
val = 0;
} else {
val = 0x1 | (queue << 1);
val |= (val << 24) | (val << 16) | (val << 8);
}
/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ staticvoid mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
{ int offset;
u32 val;
if (queue == -1) {
val = 0;
} else {
val = 0x1 | (queue << 1);
val |= (val << 24) | (val << 16) | (val << 8);
}
/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ staticvoid mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
{ int offset;
u32 val;
if (queue == -1) {
memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
val = 0;
} else {
memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
val = 0x1 | (queue << 1);
val |= (val << 24) | (val << 16) | (val << 8);
}
/* All the queue are unmasked, but actually only the ones * mapped to this CPU will be unmasked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK_ALL |
MVNETA_TX_INTR_MASK_ALL |
MVNETA_MISCINTR_INTR_MASK);
}
/* All the queue are masked, but actually only the ones * mapped to this CPU will be masked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}
/* All the queue are cleared, but actually only the ones * mapped to this CPU will be cleared
*/
mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
}
/* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. * Sets defaults to all registers. * Resets RX and TX descriptor rings. * Resets PHY. * This method can be called after mvneta_port_down() to return the port * settings to defaults.
*/ staticvoid mvneta_defaults_set(struct mvneta_port *pp)
{ int cpu; int queue;
u32 val; int max_cpu = num_present_cpus();
/* Clear all Cause registers */
on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
/* Set CPU queue access map. CPUs are assigned to the RX and * TX queues modulo their number. If there is only one TX * queue then it is assigned to the CPU associated to the * default RX queue.
*/
for_each_present_cpu(cpu) { int rxq_map = 0, txq_map = 0; int rxq, txq; if (!pp->neta_armada3700) { for (rxq = 0; rxq < rxq_number; rxq++) if ((rxq % max_cpu) == cpu)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
for (txq = 0; txq < txq_number; txq++) if ((txq % max_cpu) == cpu)
txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
/* With only one TX queue we configure a special case * which will allow to get all the irq on a single * CPU
*/ if (txq_number == 1)
txq_map = (cpu == pp->rxq_def) ?
MVNETA_CPU_TXQ_ACCESS(0) : 0;
/* Set Port Acceleration Mode */ if (pp->bm_priv) /* HW buffer management + legacy parser */
val = MVNETA_ACC_MODE_EXT2; else /* SW buffer management + legacy parser */
val = MVNETA_ACC_MODE_EXT1;
mvreg_write(pp, MVNETA_ACC_MODE, val);
if (pp->bm_priv)
mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
/* Update val of portCfg register accordingly with all RxQueue types */
val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
mvreg_write(pp, MVNETA_PORT_CONFIG, val);
val = 0;
mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
/* Build PORT_SDMA_CONFIG_REG */
val = 0;
/* Default burst size */
val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
#ifdefined(__BIG_ENDIAN)
val |= MVNETA_DESC_SWAP; #endif
/* Assign port SDMA configuration */
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
/* Disable PHY polling in hardware, since we're using the * kernel phylib to do this.
*/
val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
/* Accept frames of this address */
mvneta_set_ucast_addr(pp, addr[5], queue);
}
/* Set the number of packets that will be received before RX interrupt * will be generated by HW.
*/ staticvoid mvneta_rx_pkts_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value)
{
mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
value | MVNETA_RXQ_NON_OCCUPIED(0));
}
/* Set the time delay in usec before RX interrupt will be generated by * HW.
*/ staticvoid mvneta_rx_time_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value)
{
u32 val; unsignedlong clk_rate;
clk_rate = clk_get_rate(pp->clk);
val = (clk_rate / 1000000) * value;
/* Decrement sent descriptors counter */ staticvoid mvneta_txq_sent_desc_dec(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int sent_desc)
{
u32 val;
/* Only 255 TX descriptors can be updated at once */ while (sent_desc > 0xff) {
val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
sent_desc = sent_desc - 0xff;
}
val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
}
/* Get number of TX descriptors already sent by HW */ staticint mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
u32 val; int sent_desc;
/* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned.
*/ staticint mvneta_txq_sent_desc_proc(struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{ int sent_desc;
/* Get number of sent descriptors */
sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
/* Decrement sent descriptors counter */ if (sent_desc)
mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
return sent_desc;
}
/* Set TXQ descriptors fields relevant for CSUM calculation */ static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto, int ip_hdr_len, int l4_proto)
{
u32 command;
switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
status, rx_desc->data_size); break; case MVNETA_RXD_ERR_OVERRUN:
netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
status, rx_desc->data_size); break; case MVNETA_RXD_ERR_LEN:
netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
status, rx_desc->data_size); break; case MVNETA_RXD_ERR_RESOURCE:
netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
status, rx_desc->data_size); break;
}
}
/* Handle RX checksum offload based on the descriptor's status */ staticint mvneta_rx_csum(struct mvneta_port *pp, u32 status)
{ if ((pp->dev->features & NETIF_F_RXCSUM) &&
(status & MVNETA_RXD_L3_IP4) &&
(status & MVNETA_RXD_L4_CSUM_OK)) return CHECKSUM_UNNECESSARY;
return CHECKSUM_NONE;
}
/* Return tx queue pointer (find last set bit) according to <cause> returned * form tx_done reg. <cause> must not be null. The return value is always a * valid queue for matching the first one found in <cause>.
*/ staticstruct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
u32 cause)
{ int queue = fls(cause) - 1;
/* Drop packets received by the RXQ and free buffers */ staticvoid mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{ int rx_done, i;
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); if (rx_done)
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
if (pp->bm_priv) { for (i = 0; i < rx_done; i++) { struct mvneta_rx_desc *rx_desc =
mvneta_rxq_next_desc_get(rxq);
u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); struct mvneta_bm_pool *bm_pool;
bm_pool = &pp->bm_priv->bm_pools[pool_id]; /* Return dropped buffer to the pool */
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr);
} return;
}
for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = rxq->buf_virt_addr[i]; if (!data || !(rx_desc->buf_phys_addr)) continue;
staticinline int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{ struct mvneta_rx_desc *rx_desc; int curr_desc = rxq->first_to_refill; int i;
for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
rx_desc = rxq->descs + curr_desc; if (!(rx_desc->buf_phys_addr)) { if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { struct mvneta_pcpu_stats *stats;
pr_err("Can't refill queue %d. Done %d from %d\n",
rxq->id, i, rxq->refill_num);
if (unlikely(xdp_frame_has_frags(xdpf)))
num_frames += sinfo->nr_frags;
if (txq->count + num_frames >= txq->size) return MVNETA_XDP_DROPPED;
for (i = 0; i < num_frames; i++) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = NULL; int len = xdpf->len;
dma_addr_t dma_addr;
if (unlikely(i)) { /* paged area */
frag = &sinfo->frags[i - 1];
len = skb_frag_size(frag);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.