/* struct dtsec_cfg - dTSEC configuration * Transmit half-duplex flow control, under software control for 10/100-Mbps * half-duplex media. If set, back pressure is applied to media by raising * carrier. * halfdup_retransmit: * Number of retransmission attempts following a collision. * If this is exceeded dTSEC aborts transmission due to excessive collisions. * The standard specifies the attempt limit to be 15. * halfdup_coll_window: * The number of bytes of the frame during which collisions may occur. * The default value of 55 corresponds to the frame byte at the end of the * standard 512-bit slot time window. If collisions are detected after this * byte, the late collision event is asserted and transmission of current * frame is aborted. * tx_pad_crc: * Pad and append CRC. If set, the MAC pads all ransmitted short frames and * appends a CRC to every frame regardless of padding requirement. * tx_pause_time: * Transmit pause time value. This pause value is used as part of the pause * frame to be sent when a transmit pause frame is initiated. * If set to 0 this disables transmission of pause frames. * preamble_len: * Length, in bytes, of the preamble field preceding each Ethernet * start-of-frame delimiter byte. The default value of 0x7 should be used in * order to guarantee reliable operation with IEEE 802.3 compliant hardware. * rx_prepend: * Packet alignment padding length. The specified number of bytes (1-31) * of zero padding are inserted before the start of each received frame. * For Ethernet, where optional preamble extraction is enabled, the padding * appears before the preamble, otherwise the padding precedes the * layer 2 header. * * This structure contains basic dTSEC configuration and must be passed to * init() function. A default set of configuration values can be * obtained by calling set_dflts().
*/ struct dtsec_cfg {
u16 halfdup_retransmit;
u16 halfdup_coll_window; bool tx_pad_crc;
u16 tx_pause_time; bool ptp_tsu_en; bool ptp_exception_en;
u32 preamble_len;
u32 rx_prepend;
u16 tx_pause_time_extd;
u16 maximum_frame;
u32 non_back_to_back_ipg1;
u32 non_back_to_back_ipg2;
u32 min_ifg_enforcement;
u32 back_to_back_ipg;
};
struct fman_mac { /* pointer to dTSEC memory mapped registers */ struct dtsec_regs __iomem *regs; /* MAC address of device */
u64 addr; /* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed; struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb; /* Number of individual addresses in registers for this station */
u8 num_of_ind_addr_in_regs; /* pointer to driver's global address hash table */ struct eth_hash_t *multicast_addr_hash; /* pointer to driver's individual address hash table */ struct eth_hash_t *unicast_addr_hash;
u8 mac_id;
u32 exceptions; bool ptp_tsu_enabled; bool en_tsu_err_exception; struct dtsec_cfg *dtsec_drv_param; void *fm; struct fman_rev_info fm_rev_info; bool basex_if; struct mdio_device *tbidev; struct phylink_pcs pcs;
};
/* Assign a Phy Address to the TBI (TBIPA). * Done also in cases where TBI is not selected to avoid conflict with * the external PHY's Physical address
*/
iowrite32be(tbi_addr, ®s->tbipa);
staticint check_init_parameters(struct fman_mac *dtsec)
{ if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
MAX_PACKET_ALIGNMENT); return -EINVAL;
} if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
MAX_INTER_PACKET_GAP) ||
((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
MAX_INTER_PACKET_GAP) ||
((dtsec->dtsec_drv_param)->back_to_back_ipg >
MAX_INTER_PACKET_GAP)) {
pr_err("Inter packet gap can't be greater than %d\n",
MAX_INTER_PACKET_GAP); return -EINVAL;
} if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
MAX_RETRANSMISSION) {
pr_err("maxRetransmission can't be greater than %d\n",
MAX_RETRANSMISSION); return -EINVAL;
} if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
MAX_COLLISION_WINDOW) {
pr_err("collisionWindow can't be greater than %d\n",
MAX_COLLISION_WINDOW); return -EINVAL; /* If Auto negotiation process is disabled, need to set up the PHY * using the MII Management Interface
*/
} if (!dtsec->exception_cb) {
pr_err("uninitialized exception_cb\n"); return -EINVAL;
} if (!dtsec->event_cb) {
pr_err("uninitialized event_cb\n"); return -EINVAL;
}
/* do not handle MDIO events */
event = ioread32be(®s->ievent) &
(u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
event &= ioread32be(®s->imask);
iowrite32be(event, ®s->ievent);
if (event & DTSEC_IMASK_BREN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX); if (event & DTSEC_IMASK_RXCEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL); if (event & DTSEC_IMASK_GTSCEN)
dtsec->exception_cb(dtsec->dev_id,
FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET); if (event & DTSEC_IMASK_BTEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX); if (event & DTSEC_IMASK_TXCEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL); if (event & DTSEC_IMASK_TXEEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR); if (event & DTSEC_IMASK_LCEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL); if (event & DTSEC_IMASK_CRLEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); if (event & DTSEC_IMASK_XFUNEN) { /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ /* FIXME: This races with the rest of the driver! */ if (dtsec->fm_rev_info.major == 2) {
u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; /* a. Write 0x00E0_0C00 to DTSEC_ID * This is a read only register * b. Read and save the value of TPKT
*/
tpkt1 = ioread32be(®s->tpkt);
/* c. Read the register at dTSEC address offset 0x32C */
tmp_reg1 = ioread32be(®s->reserved02c0[27]);
/* d. Compare bits [9:15] to bits [25:31] of the * register at address offset 0x32C.
*/ if ((tmp_reg1 & 0x007F0000) !=
(tmp_reg1 & 0x0000007F)) { /* If they are not equal, save the value of * this register and wait for at least * MAXFRM*16 ns
*/
usleep_range((u32)(min
(dtsec_get_max_frame_length(dtsec) *
16 / 1000, 1)), (u32)
(min(dtsec_get_max_frame_length
(dtsec) * 16 / 1000, 1) + 1));
}
/* e. Read and save TPKT again and read the register * at dTSEC address offset 0x32C again
*/
tpkt2 = ioread32be(®s->tpkt);
tmp_reg2 = ioread32be(®s->reserved02c0[27]);
/* f. Compare the value of TPKT saved in step b to * value read in step e. Also compare bits [9:15] of * the register at offset 0x32C saved in step d to the * value of bits [9:15] saved in step e. If the two * registers values are unchanged, then the transmit * portion of the dTSEC controller is locked up and * the user should proceed to the recover sequence.
*/ if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
(tmp_reg2 & 0x007F0000))) { /* recover sequence */
/* b.Wait until IEVENT[GRSC]=1, or at least * 100 us has elapsed.
*/ for (i = 0; i < 100; i++) { if (ioread32be(®s->ievent) &
DTSEC_IMASK_GRSCEN) break;
udelay(1);
} if (ioread32be(®s->ievent) &
DTSEC_IMASK_GRSCEN)
iowrite32be(DTSEC_IMASK_GRSCEN,
®s->ievent); else
pr_debug("Rx lockup due to Tx lockup\n");
/* c.Write a 1 to bit n of FM_RSTC * (offset 0x0CC of FPM)
*/
fman_reset_mac(dtsec->fm, dtsec->mac_id);
/* d.Wait 4 Tx clocks (32 ns) */
udelay(1);
/* e.Write a 0 to bit n of FM_RSTC. */ /* cleared by FMAN
*/
}
}
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
} if (event & DTSEC_IMASK_MAGEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT); if (event & DTSEC_IMASK_GRSCEN)
dtsec->exception_cb(dtsec->dev_id,
FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET); if (event & DTSEC_IMASK_TDPEEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR); if (event & DTSEC_IMASK_RDPEEN)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
/* Graceful stop - Assert the graceful Rx stop bit */
tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
iowrite32be(tmp, ®s->rctrl);
if (dtsec->fm_rev_info.major == 2) { /* Workaround for dTSEC Errata A002 */
usleep_range(100, 200);
} else { /* Workaround for dTSEC Errata A004839 */
usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */ if (dtsec->fm_rev_info.major == 2) { /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
} else {
tmp = ioread32be(®s->tctrl) | TCTRL_GTS;
iowrite32be(tmp, ®s->tctrl);
switch (state->interface) { case PHY_INTERFACE_MODE_RMII:
tmp = DTSEC_ECNTRL_RMM; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID:
tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM; break; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX:
tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM; break; default:
dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
phy_modes(state->interface)); return;
}
/* Initialize MAC Station Address registers (1 & 2) * Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
/* Cannot handle unicast mac addr when GHTX is on */ if (ghtx && !mcast) {
pr_err("Could not compute hash bucket\n"); return -EINVAL;
}
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
crc = bitrev32(crc);
/* considering the 9 highest order bits in crc H[8:0]: *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register *and H[5:1] (next 5 bits) identify the hash bit *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register *and H[4:0] (next 5 bits) identify the hash bit. * *In bucket index output the low 5 bits identify the hash register *bit, while the higher 4 bits identify the hash register
*/
if (ghtx) {
bucket = (s32)((crc >> 23) & 0x1ff);
} else {
bucket = (s32)((crc >> 24) & 0xff); /* if !ghtx and mcast the bit must be set in gaddr instead of *igaddr.
*/ if (mcast)
bucket += 0x100;
}
set_bucket(dtsec->regs, bucket, true);
/* Create element to be added to the driver hash table */
hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM;
hash_entry->addr = addr;
INIT_LIST_HEAD(&hash_entry->node);
if (addr & MAC_GROUP_ADDRESS) /* Group Address */
list_add_tail(&hash_entry->node,
&dtsec->multicast_addr_hash->lsts[bucket]); else
list_add_tail(&hash_entry->node,
&dtsec->unicast_addr_hash->lsts[bucket]);
/* Cannot handle unicast mac addr when GHTX is on */ if (ghtx && !mcast) {
pr_err("Could not compute hash bucket\n"); return -EINVAL;
}
crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
crc = bitrev32(crc);
if (ghtx) {
bucket = (s32)((crc >> 23) & 0x1ff);
} else {
bucket = (s32)((crc >> 24) & 0xff); /* if !ghtx and mcast the bit must be set * in gaddr instead of igaddr.
*/ if (mcast)
bucket += 0x100;
}
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL); if (!dtsec) return NULL;
/* allocate memory for the d_tsec driver parameters data structure. */
dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL); if (!dtsec_drv_param) goto err_dtsec;
/* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are * supported? If not, we can determine support via the phy if SerDes * support is added.
*/ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
__set_bit(PHY_INTERFACE_MODE_SGMII, supported);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
} elseif (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
__set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
}
if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
phy_interface_set_rgmii(supported);
/* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports * RMII and RGMII. However, the only SoCs which support RMII * are the P1017 and P1023. Avoid advertising this mode on * other SoCs. This is a bit of a moot point, since there's no * in-tree support for ethernet on these platforms...
*/ if (of_machine_is_compatible("fsl,P1023") ||
of_machine_is_compatible("fsl,P1023RDB"))
__set_bit(PHY_INTERFACE_MODE_RMII, supported);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.