staticstruct { int pkt_size; int buf_num;
} mvpp2_pools[MVPP2_BM_POOLS_NUM];
/* The prototype is added here to be used in start_dev when using ACPI. This * will be removed once phylink is used for all modes (dt+ACPI).
*/ staticvoid mvpp2_acpi_start(struct mvpp2_port *port);
/* These accessors should be used to access: * * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG * MVPP2_BM_ADDR_HIGH_ALLOC * MVPP22_BM_ADDR_HIGH_RLS_REG * MVPP2_BM_VIRT_RLS_REG * MVPP2_ISR_RX_TX_CAUSE_REG * MVPP2_ISR_RX_TX_MASK_REG * MVPP2_TXQ_NUM_REG * MVPP2_AGGR_TXQ_UPDATE_REG * MVPP2_TXQ_RSVD_REQ_REG * MVPP2_TXQ_RSVD_RSLT_REG * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * * - global registers that must be accessed through a specific thread * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
*/ staticvoid mvpp2_thread_write(struct mvpp2 *priv, unsignedint thread,
u32 offset, u32 data)
{
writel(data, priv->swth_base[thread] + offset);
}
/* Get number of maximum RXQ */ staticint mvpp2_get_nrxqs(struct mvpp2 *priv)
{ unsignedint nrxqs;
if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) return 1;
/* According to the PPv2.2 datasheet and our experiments on * PPv2.1, RX queues have an allocation granularity of 4 (when * more than a single one on PPv2.2). * Round up to nearest multiple of 4.
*/
nrxqs = (num_possible_cpus() + 3) & ~0x3; if (nrxqs > MVPP2_PORT_MAX_RXQ)
nrxqs = MVPP2_PORT_MAX_RXQ;
return nrxqs;
}
/* Get number of physical egress port */ staticinlineint mvpp2_egress_port(struct mvpp2_port *port)
{ return MVPP2_MAX_TCONT + port->id;
}
/* Get number of physical TXQ */ staticinlineint mvpp2_txq_phys(int port, int txq)
{ return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
}
/* Returns a struct page if page_pool is set, otherwise a buffer */ staticvoid *mvpp2_frag_alloc(conststruct mvpp2_bm_pool *pool, struct page_pool *page_pool)
{ if (page_pool) return page_pool_dev_alloc_pages(page_pool);
if (likely(pool->frag_size <= PAGE_SIZE)) return netdev_alloc_frag(pool->frag_size);
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
val &= ~MVPP2_BM_LOW_THRESH_MASK;
val &= ~MVPP2_BM_HIGH_THRESH_MASK;
/* Set 8 Pools BPPI threshold for MVPP23 */ if (priv->hw_version == MVPP23) {
val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
} else {
val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
}
if (sizeof(dma_addr_t) == 8)
*dma_addr |= (u64)dma_addr_highbits << 32;
if (sizeof(phys_addr_t) == 8)
*phys_addr |= (u64)phys_addr_highbits << 32;
}
put_cpu();
}
/* Free all buffers from the pool */ staticvoid mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int buf_num)
{ struct page_pool *pp = NULL; int i;
if (buf_num > bm_pool->buf_num) {
WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
bm_pool->id, buf_num);
buf_num = bm_pool->buf_num;
}
if (priv->percpu_pools)
pp = priv->page_pool[bm_pool->id];
for (i = 0; i < buf_num; i++) {
dma_addr_t buf_dma_addr;
phys_addr_t buf_phys_addr; void *data;
/* Check buffer counters after free */
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); if (buf_num) {
WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
bm_pool->id, bm_pool->buf_num); return 0;
}
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
if (priv->percpu_pools) {
page_pool_destroy(priv->page_pool[bm_pool->id]);
priv->page_pool[bm_pool->id] = NULL;
}
staticint mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
{ int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; struct mvpp2_bm_pool *bm_pool;
if (priv->percpu_pools)
poolnum = mvpp2_get_nrxqs(priv) * 2;
/* Create all pools with maximum size */
size = MVPP2_BM_POOL_SIZE_MAX; for (i = 0; i < poolnum; i++) {
bm_pool = &priv->bm_pools[i];
bm_pool->id = i;
err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); if (err) goto err_unroll_pools;
mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
} return 0;
err_unroll_pools:
dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); for (i = i - 1; i >= 0; i--)
mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); return err;
}
/* Routine enable PPv23 8 pool mode */ staticvoid mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
{ int val;
val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
val |= MVPP23_BM_8POOL_MODE;
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
/* Cleanup pool before actual initialization in the OS */ staticvoid mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
{ unsignedint thread = mvpp2_cpu_to_thread(priv, get_cpu());
u32 val; int i;
/* Drain the BM from all possible residues left by firmware */ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
put_cpu();
/* Stop the BM pool */
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
}
if (priv->percpu_pools)
poolnum = mvpp2_get_nrxqs(priv) * 2;
/* Clean up the pool state in case it contains stale state */ for (i = 0; i < poolnum; i++)
mvpp2_bm_pool_cleanup(priv, i);
if (priv->percpu_pools) { for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i]; if (port->xdp_prog) {
dma_dir = DMA_BIDIRECTIONAL; break;
}
}
for (i = 0; i < poolnum; i++) { /* the pool in use */ int pn = i / (poolnum / 2);
priv->page_pool[i] =
mvpp2_create_page_pool(dev,
mvpp2_pools[pn].buf_num,
mvpp2_pools[pn].pkt_size,
dma_dir); if (IS_ERR(priv->page_pool[i])) { int j;
/* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control was enabled, it would be re-enabled.
*/
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
val &= ~FLOW_CONTROL_ENABLE_BIT;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* Set same Flow control for all RXQs */ for (q = 0; q < port->nrxqs; q++) { /* Set stop and start Flow control RXQ thresholds */
val = MSS_THRESHOLD_START;
val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); /* Set RXQ port ID */
val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ MSS_RXQ_ASS_HOSTID_OFFS));
/* Calculate RXQ host ID: * In Single queue mode: Host ID equal to Host ID used for * shared RX interrupt * In Multi queue mode: Host ID equal to number of * RXQ ID / number of CoS queues * In Single resource mode: Host ID always equal to 0
*/ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
host_id = port->nqvecs; elseif (queue_mode == MVPP2_QDIST_MULTI_MODE)
host_id = q; else
host_id = 0;
/* Set RXQ host ID */
val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ MSS_RXQ_ASS_HOSTID_OFFS));
/* Notify Firmware that Flow control config space ready for update */
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
val |= cm3_state;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control was enabled, it would be re-enabled.
*/
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
val &= ~FLOW_CONTROL_ENABLE_BIT;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* Disable Flow control for all RXQs */ for (q = 0; q < port->nrxqs; q++) { /* Set threshold 0 to disable Flow control */
val = 0;
val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ MSS_RXQ_ASS_HOSTID_OFFS));
/* Notify Firmware that Flow control config space ready for update */
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
val |= cm3_state;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control were enabled, it would be re-enabled.
*/
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
val &= ~FLOW_CONTROL_ENABLE_BIT;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* Check if BM pool should be enabled/disable */ if (en) { /* Set BM pool start and stop thresholds per port */
val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
val |= MSS_BUF_POOL_PORT_OFFS(port->id);
val &= ~MSS_BUF_POOL_START_MASK;
val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
val &= ~MSS_BUF_POOL_STOP_MASK;
val |= MSS_THRESHOLD_STOP;
mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
} else { /* Remove BM pool from the port */
val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
/* Zero BM pool start and stop thresholds to disable pool * flow control if pool empty (not used by any port)
*/ if (!pool->buf_num) {
val &= ~MSS_BUF_POOL_START_MASK;
val &= ~MSS_BUF_POOL_STOP_MASK;
}
/* Notify Firmware that Flow control config space ready for update */
val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
val |= cm3_state;
mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
/* disable/enable flow control for BM pool on all ports */ staticvoid mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{ struct mvpp2_port *port; int i, j;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i]; if (port->priv->percpu_pools) { for (j = 0; j < port->nrxqs; j++)
mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
port->tx_fc & en);
} else {
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
}
}
}
staticint mvpp2_enable_global_fc(struct mvpp2 *priv)
{ int val, timeout = 0;
/* Enable global flow control. In this stage global * flow control enabled, but still disabled per port.
*/
val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
val |= FLOW_CONTROL_ENABLE_BIT;
mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
/* Check if Firmware running and disable FC if not*/
val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
while (timeout < MSS_FC_MAX_TIMEOUT) {
val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) return 0;
usleep_range(10, 20);
timeout++;
}
priv->global_tx_fc = false; return -EOPNOTSUPP;
}
/* Release buffer to BM */ staticinlinevoid mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
phys_addr_t buf_phys_addr)
{ unsignedint thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); unsignedlong flags = 0;
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version >= MVPP22) {
u32 val = 0;
if (sizeof(dma_addr_t) == 8)
val |= upper_32_bits(buf_dma_addr) &
MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
if (sizeof(phys_addr_t) == 8)
val |= (upper_32_bits(buf_phys_addr)
<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply * returned in the "cookie" field of the RX * descriptor. Instead of storing the virtual address, we * store the physical address
*/
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
if (test_bit(thread, &port->priv->lock_map))
spin_unlock_irqrestore(&port->bm_lock[thread], flags);
put_cpu();
}
/* Allocate buffers for the pool */ staticint mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num)
{ int i, buf_size, total_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr; struct page_pool *pp = NULL; void *buf;
if (port->priv->percpu_pools &&
bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
netdev_err(port->dev, "attempted to use jumbo frames with per-cpu pools"); return 0;
}
if (buf_num < 0 ||
(buf_num + bm_pool->buf_num > bm_pool->size)) {
netdev_err(port->dev, "cannot allocate %d buffers for pool %d\n",
buf_num, bm_pool->id); return 0;
}
if (port->priv->percpu_pools)
pp = port->priv->page_pool[bm_pool->id]; for (i = 0; i < buf_num; i++) {
buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
&phys_addr, GFP_KERNEL); if (!buf) break;
netdev_dbg(port->dev, "pool %d: %d of %d buffers added\n",
bm_pool->id, i, buf_num); return i;
}
/* Notify the driver that BM pool is being used as specific type and return the * pool pointer on success
*/ staticstruct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
{ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num;
if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
(!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL;
}
/* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet
*/ if (new_pool->pkt_size == 0) { int pkts_num;
/* Set default buffer number or free all the buffers in case * the pool is not empty
*/
pkts_num = new_pool->buf_num; if (pkts_num == 0) { if (port->priv->percpu_pools) { if (pool < port->nrxqs)
pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; else
pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
} else {
pkts_num = mvpp2_pools[pool].buf_num;
}
} else {
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num);
}
staticstruct mvpp2_bm_pool *
mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, unsignedint pool, int pkt_size)
{ struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num;
if (pool > port->nrxqs * 2) {
netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL;
}
/* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet
*/ if (new_pool->pkt_size == 0) { int pkts_num;
/* Set default buffer number or free all the buffers in case * the pool is not empty
*/
pkts_num = new_pool->buf_num; if (pkts_num == 0)
pkts_num = mvpp2_pools[type].buf_num; else
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num);
/* Initialize pools for swf, shared buffers variant */ staticint mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
{ enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; int rxq;
/* If port pkt_size is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
long_log_pool = MVPP2_BM_JUMBO;
short_log_pool = MVPP2_BM_LONG;
} else {
long_log_pool = MVPP2_BM_LONG;
short_log_pool = MVPP2_BM_SHORT;
}
if (!port->pool_long) {
port->pool_long =
mvpp2_bm_pool_use(port, long_log_pool,
mvpp2_pools[long_log_pool].pkt_size); if (!port->pool_long) return -ENOMEM;
/* Initialize pools for swf, percpu buffers variant */ staticint mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
{ struct mvpp2_bm_pool *bm_pool; int i;
for (i = 0; i < port->nrxqs; i++) {
bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
mvpp2_pools[MVPP2_BM_SHORT].pkt_size); if (!bm_pool) return -ENOMEM;
bm_pool->port_map |= BIT(port->id);
mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
}
for (i = 0; i < port->nrxqs; i++) {
bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
mvpp2_pools[MVPP2_BM_LONG].pkt_size); if (!bm_pool) return -ENOMEM;
bm_pool->port_map |= BIT(port->id);
mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
}
/* Update L4 checksum when jumbo enable/disable on port. * Only port 0 supports hardware checksum offload due to * the Tx FIFO size limitation. * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor * has 7 bits, so the maximum L3 offset is 128.
*/ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
port->dev->features &= ~csums;
port->dev->hw_features &= ~csums;
} else {
port->dev->features |= csums;
port->dev->hw_features |= csums;
}
}
staticint mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{ struct mvpp2_port *port = netdev_priv(dev); enum mvpp2_bm_pool_log_num new_long_pool; int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
if (port->priv->percpu_pools) goto out_set;
/* If port MTU is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
new_long_pool = MVPP2_BM_JUMBO; else
new_long_pool = MVPP2_BM_LONG;
if (new_long_pool != port->pool_long->id) { if (port->tx_fc) { if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
mvpp2_bm_pool_update_fc(port,
port->pool_short, false); else
mvpp2_bm_pool_update_fc(port, port->pool_long, false);
}
/* Remove port from old short & long pool */
port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
port->pool_long->pkt_size);
port->pool_long->port_map &= ~BIT(port->id);
port->pool_long = NULL;
/* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK.
*/ staticvoid mvpp2_interrupts_mask(void *arg)
{ struct mvpp2_port *port = arg; int cpu = smp_processor_id();
u32 thread;
/* If the thread isn't used, don't do anything */ if (cpu > port->priv->nthreads) return;
/* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK.
*/ staticvoid mvpp2_interrupts_unmask(void *arg)
{ struct mvpp2_port *port = arg; int cpu = smp_processor_id();
u32 val, thread;
/* If the thread isn't used, don't do anything */ if (cpu >= port->priv->nthreads) return;
thread = mvpp2_cpu_to_thread(port->priv, cpu);
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); if (port->gop_id == 2) {
val |= GENCONF_CTRL0_PORT2_RGMII;
} elseif (port->gop_id == 3) {
val |= GENCONF_CTRL0_PORT3_RGMII_MII;
/* According to the specification, GENCONF_CTRL0_PORT3_RGMII * should be set to 1 for RGMII and 0 for MII. However, tests * show that it is the other way around. This is also what * U-Boot does for mvpp2, so it is assumed to be correct.
*/ if (port->phy_interface == PHY_INTERFACE_MODE_MII)
val |= GENCONF_CTRL0_PORT3_RGMII; else
val &= ~GENCONF_CTRL0_PORT3_RGMII;
}
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
val = readl(fca + MVPP22_FCA_CONTROL_REG);
val &= ~MVPP22_FCA_ENABLE_PERIODIC; if (en)
val |= MVPP22_FCA_ENABLE_PERIODIC;
writel(val, fca + MVPP22_FCA_CONTROL_REG);
}
/* Set Flow Control timer x100 faster than pause quanta to ensure that link * partner won't send traffic if port is in XOFF mode.
*/ staticvoid mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
{
u32 timer;
switch (interface) { case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: if (!mvpp2_port_supports_rgmii(port)) goto invalid_conf;
mvpp22_gop_init_rgmii(port); break; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port); break; case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: if (!mvpp2_port_supports_xlg(port)) goto invalid_conf;
mvpp22_gop_init_10gkr(port); break; default: goto unsupported_conf;
}
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
}
if (mvpp2_port_supports_xlg(port)) { /* Enable the XLG/GIG irqs for this port */
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); if (mvpp2_is_xlg(port->phy_interface))
val |= MVPP22_XLG_EXT_INT_MASK_XLG; else
val |= MVPP22_XLG_EXT_INT_MASK_GIG;
writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
}
}
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
}
if (mvpp2_port_supports_xlg(port)) {
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). * * The PHY mode used by the PPv2 driver comes from the network subsystem, while * the one given to the COMPHY comes from the generic PHY subsystem. Hence they * differ. * * The COMPHY configures the serdes lanes regardless of the actual use of the * lanes by the physical layer. This is why configurations like * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
*/ staticint mvpp22_comphy_init(struct mvpp2_port *port,
phy_interface_t interface)
{ int ret;
if (!port->comphy) return 0;
ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret;
if (state->speed == 1000)
val |= MVPP2_GMAC_GMII_LB_EN_MASK; else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
if (phy_interface_mode_is_8023z(state->interface) ||
state->interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK; else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
val = readl(port->stats_base + counter->offset); if (counter->reg_is_64b)
val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
return val;
}
/* Some counters are accessed indirectly by first writing an index to * MVPP2_CTRS_IDX. The index can represent various resources depending on the * register we access, it can be a hit counter for some classification tables, * a counter specific to a rxq, a txq or a buffer pool.
*/ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
{
mvpp2_write(priv, MVPP2_CTRS_IDX, index); return mvpp2_read(priv, reg);
}
/* Due to the fact that software statistics and hardware statistics are, by * design, incremented at different moments in the chain of packet processing, * it is very likely that incoming packets could have been dropped after being * counted by hardware but before reaching software statistics (most probably * multicast packets), and in the opposite way, during transmission, FCS bytes * are added in between as well as TSO skb will be split and header bytes added. * Hence, statistics gathered from userspace with ifconfig (software) and * ethtool (hardware) cannot be compared.
*/ staticconststruct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
{ MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
{ MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
{ MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
{ MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
{ MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
{ MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
{ MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
{ MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
{ MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
{ MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
{ MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
{ MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
{ MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
{ MVPP2_MIB_FC_SENT, "fc_sent" },
{ MVPP2_MIB_FC_RCVD, "fc_received" },
{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
{ MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
{ MVPP2_MIB_COLLISION, "collision" },
{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
};
staticvoid mvpp2_read_stats(struct mvpp2_port *port)
{ struct mvpp2_pcpu_stats xdp_stats = {}; conststruct mvpp2_ethtool_counter *s;
u64 *pstats; int i, q;
pstats = port->ethtool_stats;
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
*pstats++ += mvpp2_read(port->priv,
mvpp2_ethtool_port_regs[i].offset +
4 * port->id);
for (q = 0; q < port->ntxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
MVPP22_CTRS_TX_CTR(port->id, q),
mvpp2_ethtool_txq_regs[i].offset);
/* Rxqs are numbered from 0 from the user standpoint, but not from the * driver's. We need to add the port->first_rxq offset.
*/ for (q = 0; q < port->nrxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
port->first_rxq + q,
mvpp2_ethtool_rxq_regs[i].offset);
/* No need to read again the counters right after this function if it * was called asynchronously by the user (ie. use of ethtool).
*/
cancel_delayed_work(&port->stats_work);
queue_delayed_work(port->priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
/* Update statistics for the given port, then take the lock to avoid * concurrent accesses on the ethtool_stats structure during its copy.
*/
mvpp2_gather_hw_statistics(&port->stats_work.work);
switch (interface) { case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
MAC_CLK_RESET_SD_TX;
val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
writel(val, mpcs + MVPP22_MPCS_CLK_RESET); break; case PHY_INTERFACE_MODE_XAUI: case PHY_INTERFACE_MODE_RXAUI:
val = readl(xpcs + MVPP22_XPCS_CFG0);
writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); break; default: break;
}
}
/* Change maximum receive size of the port */ staticinlinevoid mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
MVPP2_GMAC_MAX_RX_SIZE_OFFS);
writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
}
/* Change maximum receive size of the port */ staticinlinevoid mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
val = readl(port->base + MVPP22_XLG_CTRL1_REG);
val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
writel(val, port->base + MVPP22_XLG_CTRL1_REG);
}
/* Set defaults to the MVPP2 port */ staticvoid mvpp2_defaults_set(struct mvpp2_port *port)
{ int tx_port_num, val, queue, lrxq;
if (port->priv->hw_version == MVPP21) { /* Update TX FIFO MIN Threshold */
val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; /* Min. TX threshold must be less than minimal packet length */
val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
}
/* Set TXQ scheduling to Round-Robin */
mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
/* Close bandwidth for all queues */
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.33Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.