/* Enabling promiscuous mode for one interface will be * common for both the interface as the interface shares * the same hardware resource.
*/ for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
if (!enable && flag) {
enable = true;
dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
}
/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
ALE_PORT_NO_SA_UPDATE, 1);
}
/* Clear All Untouched entries */
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); do {
cpu_relax(); if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) break;
} while (time_after(timeout, jiffies));
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
dev_dbg(&ndev->dev, "promiscuity enabled\n");
} else { /* Don't Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ for (i = 0; i <= cpsw->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
ALE_PORT_NO_SA_UPDATE, 0);
}
dev_dbg(&ndev->dev, "promiscuity disabled\n");
}
}
}
/** * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes * if it's not deleted * @ndev: device to sync * @addr: address to be added or deleted * @vid: vlan id, if vid < 0 set/unset address for real device * @add: add address if the flag is set or remove otherwise
*/ staticint cpsw_set_mc(struct net_device *ndev, const u8 *addr, int vid, int add)
{ struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; int mask, flags, ret;
if (vid < 0) { if (cpsw->data.dual_emac)
vid = cpsw->slaves[priv->emac_port].port_vlan; else
vid = 0;
}
if (add)
ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); else
ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
return ret;
}
staticint cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{ struct addr_sync_ctx *sync_ctx = ctx; struct netdev_hw_addr *ha; int found = 0, ret = 0;
if (!vdev || !(vdev->flags & IFF_UP)) return 0;
/* vlan address is relevant if its sync_cnt != 0 */
netdev_for_each_mc_addr(ha, vdev) { if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
found = ha->sync_cnt; break;
}
}
if (found)
sync_ctx->consumed++;
if (sync_ctx->flush) { if (!found)
cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); return 0;
}
if (found)
ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
return ret;
}
staticint cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
{ struct addr_sync_ctx sync_ctx; int ret;
staticint cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{ struct addr_sync_ctx *sync_ctx = ctx; struct netdev_hw_addr *ha; int found = 0;
if (!vdev || !(vdev->flags & IFF_UP)) return 0;
/* vlan address is relevant if its sync_cnt != 0 */
netdev_for_each_mc_addr(ha, vdev) { if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
found = ha->sync_cnt; break;
}
}
/* Restore allmulti on vlans if necessary */
cpsw_ale_set_allmulti(cpsw->ale,
ndev->flags & IFF_ALLMULTI, slave_port);
/* add/remove mcast address either for real netdev or for vlan */
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
cpsw_del_mc_addr);
}
staticunsignedint cpsw_rxbuf_total_len(unsignedint len)
{
len += CPSW_HEADROOM_NA;
len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
return SKB_DATA_ALIGN(len);
}
staticvoid cpsw_rx_handler(void *token, int len, int status)
{ struct page *new_page, *page = token; void *pa = page_address(page); struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET; struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev); int pkt_size = cpsw->rx_packet_max; int ret = 0, port, ch = xmeta->ch; int headroom = CPSW_HEADROOM_NA; struct net_device *ndev = xmeta->ndev;
u32 metasize = 0; struct cpsw_priv *priv; struct page_pool *pool; struct sk_buff *skb; struct xdp_buff xdp;
dma_addr_t dma;
if (cpsw->data.dual_emac && status >= 0) {
port = CPDMA_RX_SOURCE_PORT(status); if (port)
ndev = cpsw->slaves[--port].ndev;
}
priv = netdev_priv(ndev);
pool = cpsw->page_pool[ch]; if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { /* In dual emac mode check for all interfaces */ if (cpsw->data.dual_emac && cpsw->usage_count &&
(status >= 0)) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results * in reducing of the number of rx descriptor in * DMA engine, requeue page back to cpdma.
*/
new_page = page; goto requeue;
}
/* the interface is going down, pages are purged */
page_pool_recycle_direct(pool, page); return;
}
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
}
/* pass skb to netstack if no XDP prog or returned XDP_PASS */
skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); if (!skb) {
ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page); goto requeue;
}
skb_reserve(skb, headroom);
skb_put(skb, len); if (metasize)
skb_metadata_set(skb, metasize);
skb->dev = ndev; if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb); if (priv->rx_ts_enabled)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
skb_mark_for_recycle(skb);
netif_receive_skb(skb);
if (phy->link) {
mac_control = CPSW_SL_CTL_GMII_EN;
if (phy->speed == 1000)
mac_control |= CPSW_SL_CTL_GIG; if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX;
/* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100)
mac_control |= CPSW_SL_CTL_IFCTL_A; /* in band mode only works in 10Mbps RGMII mode */ elseif ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
if (priv->rx_pause)
mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
if (priv->tx_pause)
mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
if (mac_control != slave->mac_control)
cpsw_sl_ctl_set(slave->mac_sl, mac_control);
switch (cpsw->version) { case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); /* Increase RX FIFO size to 5 for supporting fullduplex * flow control mode
*/
slave_write(slave,
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); break; case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); /* Increase RX FIFO size to 5 for supporting fullduplex * flow control mode
*/
slave_write(slave,
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); break;
}
/* setup max packet size, and mac address */
cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
cpsw->rx_packet_max);
cpsw_set_slave_mac(slave, priv);
ret = pm_runtime_resume_and_get(cpsw->dev); if (ret < 0) return ret;
netif_carrier_off(ndev);
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); if (ret) {
dev_err(priv->dev, "cannot set real number of tx queues\n"); goto err_cleanup;
}
ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); if (ret) {
dev_err(priv->dev, "cannot set real number of rx queues\n"); goto err_cleanup;
}
reg = cpsw->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
CPSW_RTL_VERSION(reg));
/* Initialize host and slave ports */ if (!cpsw->usage_count)
cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
if (cpsw->tx_irq_disabled) {
cpsw->tx_irq_disabled = false;
enable_irq(cpsw->irqs_table[1]);
}
if (cpsw->rx_irq_disabled) {
cpsw->rx_irq_disabled = false;
enable_irq(cpsw->irqs_table[0]);
}
/* create rxqs for both infs in dual mac as they use same pool * and must be destroyed together when no users.
*/
ret = cpsw_create_xdp_rxqs(cpsw); if (ret < 0) goto err_cleanup;
ret = cpsw_fill_rx_channels(priv); if (ret < 0) goto err_cleanup;
if (cpsw->cpts) { if (cpts_register(cpsw->cpts))
dev_err(priv->dev, "error registering cpts device\n"); else
writel(0x10, &cpsw->wr_regs->misc_en);
}
}
cpsw_restore(priv);
/* Enable Interrupt pacing if configured */ if (cpsw->coal_intvl != 0) { struct ethtool_coalesce coal;
/* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames.
*/ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
netif_tx_stop_queue(txq);
/* Barrier, so that stop_queue visible to other cpus */
smp_mb__after_atomic();
if (cpdma_check_free_tx_desc(txch))
netif_tx_wake_queue(txq);
}
ret = pm_runtime_resume_and_get(cpsw->dev); if (ret < 0) return ret;
if (cpsw->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual * EMAC port separation
*/ int i;
for (i = 0; i < cpsw->data.slaves; i++) { if (vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL; goto err;
}
}
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
err:
pm_runtime_put(cpsw->dev); return ret;
}
/* We need a custom implementation of phy_do_ioctl_running() because in switch * mode, dev->phydev may be different than the phy of the active_slave. We need * to operate on the locally saved phy instead.
*/ staticint cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{ struct cpsw_priv *priv = netdev_priv(dev); struct cpsw_common *cpsw = priv->cpsw; int slave_no = cpsw_slave_index(cpsw, priv); struct phy_device *phy;
if (!netif_running(dev)) return -EINVAL;
phy = cpsw->slaves[slave_no].phy; if (phy) return phy_mii_ioctl(phy, req, cmd);
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n"); return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n"); return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); return -EINVAL;
}
data->mac_control = prop;
if (of_property_read_bool(node, "dual_emac"))
data->dual_emac = true;
/* * Populate all the child nodes here...
*/
ret = of_platform_populate(node, NULL, NULL, &pdev->dev); /* We do not want to force this, as in some cases may not have child */ if (ret)
dev_warn(&pdev->dev, "Doesn't have any child node\n");
/* This is no slave child node, continue */ if (!of_node_name_eq(slave_node, "slave")) continue;
slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
NULL); if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
IS_ERR(slave_data->ifphy)) {
ret = PTR_ERR(slave_data->ifphy);
dev_err(&pdev->dev, "%d: Error retrieving port phy: %d\n", i, ret); goto err_node_put;
}
slave_data->slave_node = slave_node;
slave_data->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp); if (slave_data->phy_node) {
dev_dbg(&pdev->dev, "slave[%d] using phy-handle=\"%pOF\"\n",
i, slave_data->phy_node);
} elseif (of_phy_is_fixed_link(slave_node)) { /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node); if (ret) {
dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n"); goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
} elseif (parp) {
u32 phyid; struct device_node *mdio_node; struct platform_device *mdio;
if (lenp != (sizeof(__be32) * 2)) {
dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i); goto no_phy_slave;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
of_node_put(mdio_node); if (!mdio) {
dev_err(&pdev->dev, "Missing mdio platform device\n");
ret = -EINVAL; goto err_node_put;
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
put_device(&mdio->dev);
} else {
dev_err(&pdev->dev, "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
i); goto no_phy_slave;
}
ret = of_get_phy_mode(slave_node, &slave_data->phy_if); if (ret) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i); goto err_node_put;
}
no_phy_slave:
ret = of_get_mac_address(slave_node, slave_data->mac_addr); if (ret) {
ret = ti_cm_get_macid(&pdev->dev, i,
slave_data->mac_addr); if (ret) goto err_node_put;
} if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
slave_data->dual_emac_res_vlan, i);
} else {
slave_data->dual_emac_res_vlan = prop;
}
}
i++; if (i == data->slaves) {
ret = 0; goto err_node_put;
}
}
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); if (IS_ERR(mode)) {
ret = PTR_ERR(mode);
dev_err(dev, "gpio request failed, ret %d\n", ret); return ret;
}
clk = devm_clk_get(dev, "fck"); if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "fck is not found %d\n", ret); return ret;
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
/* get misc irq*/
irq = platform_get_irq(pdev, 3); if (irq <= 0) return irq;
cpsw->misc_irq = irq;
/* * This may be required here for child devices.
*/
pm_runtime_enable(dev);
/* Need to enable clocks with runtime PM api to access module * registers
*/
ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto clean_runtime_disable_ret;
ret = cpsw_probe_dt(&cpsw->data, pdev); if (ret) goto clean_dt_ret;
soc = soc_device_match(cpsw_soc_devices); if (soc)
cpsw->quirk_irq = true;
data = &cpsw->data;
cpsw->slaves = devm_kcalloc(dev,
data->slaves, sizeof(struct cpsw_slave),
GFP_KERNEL); if (!cpsw->slaves) {
ret = -ENOMEM; goto clean_dt_ret;
}
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
ret = register_netdev(ndev); if (ret) {
dev_err(dev, "error registering net device\n");
ret = -ENODEV; goto clean_cpts;
}
if (cpsw->data.dual_emac) {
ret = cpsw_probe_dual_emac(priv); if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); goto clean_unregister_netdev_ret;
}
}
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. * * If anyone wants to implement support for those, make sure to * first request and append them to irqs_table array.
*/
ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
0, dev_name(dev), cpsw); if (ret < 0) {
dev_err(dev, "error attaching irq (%d)\n", ret); goto clean_unregister_netdev_ret;
}
staticvoid cpsw_remove(struct platform_device *pdev)
{ struct cpsw_common *cpsw = platform_get_drvdata(pdev); int i, ret;
ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { /* Note, if this error path is taken, we're leaking some * resources.
*/
dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
ERR_PTR(ret)); return;
}
for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev)
unregister_netdev(cpsw->slaves[i].ndev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.