// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. * Copyright (c) 2014, Synopsys, Inc. * All rights reserved
*/
/* * Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256
*/
ret = (usec * (rate / 1000000)) / 256;
/* * Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
*/
ret = (riwt * 256) / (rate / 1000000);
while (key_regs--) {
ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
key_regs, *key++); if (ret) return ret;
}
return 0;
}
staticint xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
{ unsignedint i; int ret;
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
ret = xgbe_write_rss_reg(pdata,
XGBE_RSS_LOOKUP_TABLE_TYPE, i,
pdata->rss_table[i]); if (ret) return ret;
}
/* From MAC ver 30H the TFCR is per priority, instead of per queue */ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) return max_q_count; else return min_t(unsignedint, pdata->tx_q_count, max_q_count);
}
/* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { unsignedint ehfc = 0;
if (pdata->rx_rfd[i]) { /* Flow control thresholds are established */ if (pfc && ets) { if (xgbe_is_pfc_queue(pdata, i))
ehfc = 1;
} else {
ehfc = 1;
}
}
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
netif_dbg(pdata, drv, pdata->netdev, "flow control %s for RXq%u\n",
ehfc ? "enabled" : "disabled", i);
}
/* Set MAC flow control */
q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
/* Enable transmit flow control */
XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); /* Set pause time */
XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */
mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
/* No MTL interrupts to be enabled */
XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
}
}
staticint xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
{ unsignedint ss;
switch (speed) { case SPEED_10:
ss = 0x07; break; case SPEED_1000:
ss = 0x03; break; case SPEED_2500:
ss = 0x02; break; case SPEED_10000:
ss = 0x00; break; default: return -EINVAL;
}
if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
return 0;
}
staticint xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
{ /* Put the VLAN tag in the Rx descriptor */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
/* Don't check the VLAN type */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
/* Only filter on the lower 12-bits of the VLAN tag */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
/* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering.
*/
XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
/* Generate the VLAN Hash Table value */
for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { /* Get the CRC32 value of the VLAN ID */
vid_le = cpu_to_le16(vid);
crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
vlan_hash_table |= (1 << crc);
}
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
/* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) {
xgbe_disable_rx_vlan_filtering(pdata);
} else { if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
xgbe_enable_rx_vlan_filtering(pdata);
}
/* Set the MAC Hash Table registers */
hash_reg = MAC_HTR0; for (i = 0; i < hash_table_count; i++) {
XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
hash_reg += MAC_HTR_INC;
}
}
staticint xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
{ if (pdata->hw_feat.hash_table_size)
xgbe_set_mac_hash_table(pdata); else
xgbe_set_mac_addn_addrs(pdata);
staticvoid xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, unsignedint mmd_address, unsignedint *index, unsignedint *offset)
{ /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
*index = mmd_address & ~pdata->xpcs_window_mask;
*offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
}
staticint xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
{ unsignedint mmd_address, index, offset;
u32 smn_address; int mmd_data; int ret;
/* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data;
}
staticvoid xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data)
{ unsignedint mmd_address; unsignedlong flags;
/* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and writing 32 bits of data.
*/
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
staticint xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
{ switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
case XGBE_XPCS_ACCESS_V2: default: return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
case XGBE_XPCS_ACCESS_V3: return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
staticvoid xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data)
{ switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
case XGBE_XPCS_ACCESS_V3: return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
case XGBE_XPCS_ACCESS_V2: default: return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
}
}
staticunsignedint xgbe_create_mdio_sca_c22(int port, int reg)
{ unsignedint mdio_sca;
/* Reset the Tx descriptor * Set buffer 1 (lo) address to zero * Set buffer 1 (hi) address to zero * Reset all other control bits (IC, TTSE, B2L & B1L) * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
*/
rdesc->desc0 = 0;
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
/* Make sure ownership is written to the descriptor */
dma_wmb();
}
staticvoid xgbe_tx_desc_init(struct xgbe_channel *channel)
{ struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; int i; int start_index = ring->cur;
DBGPR("-->tx_desc_init\n");
/* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */
inte = 1;
} else { /* Set interrupt based on Rx frame coalescing setting */ if (rx_frames && !((index + 1) % rx_frames))
inte = 1; else
inte = 0;
}
/* Reset the Rx descriptor * Set buffer 1 (lo) address to header dma address (lo) * Set buffer 1 (hi) address to header dma address (hi) * Set buffer 2 (lo) address to buffer dma address (lo) * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE
*/
hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
/* Since the Rx DMA engine is likely running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the descriptor
*/
dma_wmb();
/* Make sure everything is written before the register write */
wmb();
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
/* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) {
channel->tx_timer_active = 1;
mod_timer(&channel->tx_timer,
jiffies + usecs_to_jiffies(pdata->tx_usecs));
}
/* Determine if an interrupt should be generated for this Tx: * Interrupt: * - Tx frame count exceeds the frame count setting * - Addition of Tx frame count to the frame count since the * last interrupt was set exceeds the frame count setting * No interrupt: * - No frame count setting specified (ethtool -C ethX tx-frames 0) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting
*/
ring->coalesce_count += tx_packets; if (!pdata->tx_frames)
tx_set_ic = 0; elseif (tx_packets > pdata->tx_frames)
tx_set_ic = 1; elseif ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
tx_set_ic = 1; else
tx_set_ic = 0;
/* Create a context descriptor if this is a TSO packet */ if (tso_context || vlan_context) { if (tso_context) {
netif_dbg(pdata, tx_queued, pdata->netdev, "TSO context descriptor, mss=%u\n",
packet->mss);
/* Set the MSS size */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
MSS, packet->mss);
/* Mark it as a CONTEXT descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);
/* Indicate this descriptor contains the MSS */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
TCMSSV, 1);
/* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit * for the first descriptor
*/
dma_wmb();
/* Set OWN bit for the first descriptor */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
if (netif_msg_tx_queued(pdata))
xgbe_dump_tx_desc(pdata, ring, start_index,
packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
smp_wmb();
/* Normal Descriptor, be sure Context Descriptor bit is off */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
/* Indicate if a Context Descriptor is next */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
/* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL); if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++;
} else {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
FIRST, 0);
}
/* Get the RSS hash */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
RSS_HASH, 1);
packet->rss_hash = le32_to_cpu(rdesc->desc1);
l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); switch (l34t) { case RX_DESC3_L34T_IPV4_TCP: case RX_DESC3_L34T_IPV4_UDP: case RX_DESC3_L34T_IPV6_TCP: case RX_DESC3_L34T_IPV6_UDP:
packet->rss_hash_type = PKT_HASH_TYPE_L4; break; default:
packet->rss_hash_type = PKT_HASH_TYPE_L3;
}
}
/* Not all the data has been transferred for this packet */ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) return 0;
/* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
LAST, 1);
/* Get the packet length */
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */ if (netdev->features & NETIF_F_RXCSUM) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 1);
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
TNPCSUM_DONE, 1);
}
/* Set the tunneled packet indicator */ if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
TNP, 1);
pdata->ext_stats.rx_vxlan_packets++;
l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); switch (l34t) { case RX_DESC3_L34T_IPV4_UNKNOWN: case RX_DESC3_L34T_IPV6_UNKNOWN:
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
TNPCSUM_DONE, 0); break;
}
}
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ if ((etlt == 0x09) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
VLAN_CTAG, 1);
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
RX_NORMAL_DESC0,
OVT);
netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
packet->vlan_ctag);
}
} else { unsignedint tnp = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, TNP);
/* Poll Until Poll Condition */ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
usleep_range(500, 600);
if (!count) return -EBUSY;
DBGPR("<--xgbe_exit\n");
return 0;
}
staticint xgbe_exit(struct xgbe_prv_data *pdata)
{ int ret;
/* To guard against possible incorrectly generated interrupts, * issue the software reset twice.
*/
ret = __xgbe_exit(pdata); if (ret) return ret;
return __xgbe_exit(pdata);
}
staticint xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{ unsignedint i, count;
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) return 0;
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
/* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) {
count = 2000; while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
usleep_range(500, 600);
/* Set Tx to weighted round robin scheduling algorithm */
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
/* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
}
/* Set Rx to strict priority algorithm */
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { /* PFC is active for this queue */
rfa = pdata->pfc_rfa;
rfd = rfa + frame_fifo_size; if (rfd > XGMAC_FLOW_CONTROL_MAX)
rfd = XGMAC_FLOW_CONTROL_MAX; if (rfa >= XGMAC_FLOW_CONTROL_MAX)
rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
} else { /* This path deals with just maximum frame sizes which are * limited to a jumbo frame of 9,000 (plus headers, etc.) * so we can never exceed the maximum allowable RFA/RFD * values.
*/ if (q_fifo_size <= 2048) { /* rx_rfd to zero to signal no flow control */
pdata->rx_rfa[queue] = 0;
pdata->rx_rfd[queue] = 0; return;
}
if (q_fifo_size <= 4096) { /* Between 2048 and 4096 */
pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ return;
}
if (q_fifo_size <= frame_fifo_size) { /* Between 4096 and max-frame */
pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ return;
}
if (q_fifo_size <= (frame_fifo_size * 3)) { /* Between max-frame and 3 max-frames, * trigger if we get just over a frame of data and * resume when we have just under half a frame left.
*/
rfa = q_fifo_size - frame_fifo_size;
rfd = rfa + (frame_fifo_size / 2);
} else { /* Above 3 max-frames - trigger when just over * 2 frames of space available
*/
rfa = frame_fifo_size * 2;
rfa += XGMAC_FLOW_CONTROL_UNIT;
rfd = rfa + frame_fifo_size;
}
}
for (i = 0; i < pdata->rx_q_count; i++) {
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
pdata->rx_rfa[i]);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
pdata->rx_rfd[i]);
}
}
staticunsignedint xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
{ /* The configured value may not be the actual amount of fifo RAM */ return min_t(unsignedint, pdata->tx_max_fifo_size,
pdata->hw_feat.tx_fifo_size);
}
staticunsignedint xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
{ /* The configured value may not be the actual amount of fifo RAM */ return min_t(unsignedint, pdata->rx_max_fifo_size,
pdata->hw_feat.rx_fifo_size);
}
/* Calculate the fifo setting by dividing the queue's fifo size * by the fifo allocation increment (with 0 representing the * base allocation increment so decrement the result by 1).
*/
p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; if (p_fifo)
p_fifo--;
/* Distribute the fifo equally amongst the queues */ for (i = 0; i < queue_count; i++)
fifo[i] = p_fifo;
}
if (queue_count <= IEEE_8021QAZ_MAX_TCS) return fifo_size;
/* Rx queues 9 and up are for specialized packets, * such as PTP or DCB control packets, etc. and * don't require a large fifo
*/ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
fifo_size -= XGMAC_FIFO_MIN_ALLOC;
}
if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) { /* No traffic classes with PFC enabled or can't do lossless */
xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); return;
}
/* Calculate how much fifo we have to play with */
rem_fifo = fifo_size - (q_fifo_size * prio_queues);
/* Calculate how much more than base fifo PFC needs, which also * becomes the threshold activation point (RFA)
*/
pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
/* Calculate DCB fifo settings: * - distribute remaining fifo between the VLAN priority * queues based on traffic class PFC enablement and overall * priority (0 is lowest priority, so start at highest)
*/
i = prio_queues; while (i > 0) {
i--;
fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
if (!pdata->pfcq[i] || !addn_fifo) continue;
if (addn_fifo > rem_fifo) {
netdev_warn(pdata->netdev, "RXq%u cannot set needed fifo size\n", i); if (!rem_fifo) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.