/* struct cpsw_common is not needed, kept here for compatibility * reasons witrh the old driver
*/ staticint cpsw_slave_index_priv(struct cpsw_common *cpsw, struct cpsw_priv *priv)
{ if (priv->emac_port == HOST_PORT_NUM) return -1;
/* Enabling promiscuous mode for one interface will be * common for both the interface as the interface shares * the same hardware resource.
*/ for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev &&
(cpsw->slaves[i].ndev->flags & IFF_PROMISC))
enable_uni = true;
if (!enable && enable_uni) {
enable = enable_uni;
dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
}
/** * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes * if it's not deleted * @ndev: device to sync * @addr: address to be added or deleted * @vid: vlan id, if vid < 0 set/unset address for real device * @add: add address if the flag is set or remove otherwise
*/ staticint cpsw_set_mc(struct net_device *ndev, const u8 *addr, int vid, int add)
{ struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; int mask, flags, ret, slave_no;
slave_no = cpsw_slave_index(cpsw, priv); if (vid < 0)
vid = cpsw->slaves[slave_no].port_vlan;
mask = ALE_PORT_HOST;
flags = vid ? ALE_VLAN : 0;
if (add)
ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); else
ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
return ret;
}
staticint cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{ struct addr_sync_ctx *sync_ctx = ctx; struct netdev_hw_addr *ha; int found = 0, ret = 0;
if (!vdev || !(vdev->flags & IFF_UP)) return 0;
/* vlan address is relevant if its sync_cnt != 0 */
netdev_for_each_mc_addr(ha, vdev) { if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
found = ha->sync_cnt; break;
}
}
if (found)
sync_ctx->consumed++;
if (sync_ctx->flush) { if (!found)
cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); return 0;
}
if (found)
ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
return ret;
}
staticint cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
{ struct addr_sync_ctx sync_ctx; int ret;
staticint cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{ struct addr_sync_ctx *sync_ctx = ctx; struct netdev_hw_addr *ha; int found = 0;
if (!vdev || !(vdev->flags & IFF_UP)) return 0;
/* vlan address is relevant if its sync_cnt != 0 */
netdev_for_each_mc_addr(ha, vdev) { if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
found = ha->sync_cnt; break;
}
}
if (status >= 0) {
port = CPDMA_RX_SOURCE_PORT(status); if (port)
ndev = cpsw->slaves[--port].ndev;
}
priv = netdev_priv(ndev);
pool = cpsw->page_pool[ch];
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { /* In dual emac mode check for all interfaces */ if (cpsw->usage_count && status >= 0) { /* The packet received is for the interface which * is already down and the other interface is up * and running, instead of freeing which results * in reducing of the number of rx descriptor in * DMA engine, requeue page back to cpdma.
*/
new_page = page; goto requeue;
}
/* the interface is going down, pages are purged */
page_pool_recycle_direct(pool, page); return;
}
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
}
/* pass skb to netstack if no XDP prog or returned XDP_PASS */
skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); if (!skb) {
ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page); goto requeue;
}
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len); if (metasize)
skb_metadata_set(skb, metasize);
skb->dev = ndev; if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb); if (priv->rx_ts_enabled)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
skb_mark_for_recycle(skb);
netif_receive_skb(skb);
if (cpsw_is_switch_en(cpsw)) {
dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n"); return 0;
}
if (vid == cpsw->data.default_vlan) return 0;
ret = pm_runtime_resume_and_get(cpsw->dev); if (ret < 0) return ret;
/* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual * EMAC port separation
*/ for (i = 0; i < cpsw->data.slaves; i++) { if (cpsw->slaves[i].ndev &&
vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL; goto err;
}
}
dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);
err:
pm_runtime_put(cpsw->dev); return ret;
}
cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); /* learning make no sense in dual_mac mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
}
cpsw_ale_control_set(cpsw->ale, priv->emac_port,
ALE_PORT_DROP_UNKNOWN_VLAN, 0);
cpsw_ale_control_set(cpsw->ale, priv->emac_port,
ALE_PORT_NOLEARN, 0); /* disabling SA_UPDATE required to make stp work, without this setting * Host MAC addresses will jump between ports. * As per TRM MAC address can be defined as unicast supervisory (super) * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE * causes STP packets to be dropped due to ingress filter * if (source address found) and (secure) and * (receive port number != port_number)) * then discard the packet
*/
cpsw_ale_control_set(cpsw->ale, priv->emac_port,
ALE_PORT_NO_SA_UPDATE, 1);
if (phy->link) {
mac_control = CPSW_SL_CTL_GMII_EN;
if (phy->speed == 1000)
mac_control |= CPSW_SL_CTL_GIG; if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX;
/* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100)
mac_control |= CPSW_SL_CTL_IFCTL_A; /* in band mode only works in 10Mbps RGMII mode */ elseif ((phy->speed == 10) && phy_interface_is_rgmii(phy))
mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
if (priv->rx_pause)
mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
if (priv->tx_pause)
mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
if (mac_control != slave->mac_control)
cpsw_sl_ctl_set(slave->mac_sl, mac_control);
switch (cpsw->version) { case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); /* Increase RX FIFO size to 5 for supporting fullduplex * flow control mode
*/
slave_write(slave,
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); break; case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); /* Increase RX FIFO size to 5 for supporting fullduplex * flow control mode
*/
slave_write(slave,
(CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); break;
}
/* setup max packet size, and mac address */
cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
cpsw->rx_packet_max);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
if (cpsw_is_switch_en(cpsw))
cpsw_port_add_switch_def_ale_entries(priv, slave); else
cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
if (!slave->data->phy_node)
dev_err(priv->dev, "no phy found on slave %d\n",
slave->slave_num);
phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if); if (!phy) {
dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
slave->data->phy_node,
slave->slave_num); return;
}
dev_info(priv->dev, "starting ndev. mode: %s\n",
cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
ret = pm_runtime_resume_and_get(cpsw->dev); if (ret < 0) return ret;
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); if (ret) {
dev_err(priv->dev, "cannot set real number of tx queues\n"); goto pm_cleanup;
}
ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); if (ret) {
dev_err(priv->dev, "cannot set real number of rx queues\n"); goto pm_cleanup;
}
/* Initialize host and slave ports */ if (!cpsw->usage_count)
cpsw_init_host_port(priv);
cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
/* initialize shared resources for every ndev */ if (!cpsw->usage_count) { /* create rxqs for both infs in dual mac as they use same pool * and must be destroyed together when no users.
*/
ret = cpsw_create_xdp_rxqs(cpsw); if (ret < 0) goto err_cleanup;
ret = cpsw_fill_rx_channels(priv); if (ret < 0) goto err_cleanup;
if (cpsw->cpts) { if (cpts_register(cpsw->cpts))
dev_err(priv->dev, "error registering cpts device\n"); else
writel(0x10, &cpsw->wr_regs->misc_en);
}
/* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames.
*/ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
netif_tx_stop_queue(txq);
/* Barrier, so that stop_queue visible to other cpus */
smp_mb__after_atomic();
if (cpdma_check_free_tx_desc(txch))
netif_tx_wake_queue(txq);
}
staticint cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{ struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; int ret; int i;
if (cpsw_is_switch_en(cpsw)) {
dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n"); return 0;
}
if (vid == cpsw->data.default_vlan) return 0;
ret = pm_runtime_resume_and_get(cpsw->dev); if (ret < 0) return ret;
/* reset the return code as pm_runtime_get_sync() can return * non zero values as well.
*/
ret = 0; for (i = 0; i < cpsw->data.slaves; i++) { if (cpsw->slaves[i].ndev &&
vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL; goto err;
}
}
dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); if (ret)
dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid); if (ret)
dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
ret);
ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid); if (ret)
dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
ret);
cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
ret = 0;
err:
pm_runtime_put(cpsw->dev); return ret;
}
/* Populate all the child nodes here...
*/
ret = devm_of_platform_populate(dev); /* We do not want to force this, as in some cases may not have child */ if (ret)
dev_warn(dev, "Doesn't have any child node\n");
if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
dev_err(dev, "%pOF has invalid port_id %u\n",
port_np, port_id);
ret = -EINVAL; goto err_node_put;
}
slave_data = &data->slave_data[port_id - 1];
slave_data->disabled = !of_device_is_available(port_np); if (slave_data->disabled) continue;
slave_data->slave_node = port_np;
slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL); if (IS_ERR(slave_data->ifphy)) {
ret = PTR_ERR(slave_data->ifphy);
dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
port_np, ret); goto err_node_put;
}
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np); if (ret) {
dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
port_np); goto err_node_put;
}
slave_data->phy_node = of_node_get(port_np);
} else {
slave_data->phy_node =
of_parse_phandle(port_np, "phy-handle", 0);
}
if (!slave_data->phy_node) {
dev_err(dev, "%pOF no phy found\n", port_np);
ret = -ENODEV; goto err_node_put;
}
ret = of_get_phy_mode(port_np, &slave_data->phy_if); if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret); goto err_node_put;
}
ret = of_get_mac_address(port_np, slave_data->mac_addr); if (ret) {
ret = ti_cm_get_macid(dev, port_id - 1,
slave_data->mac_addr); if (ret) goto err_node_put;
}
if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
&prop)) {
dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
port_np);
slave_data->dual_emac_res_vlan = port_id;
dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
port_np, slave_data->dual_emac_res_vlan);
} else {
slave_data->dual_emac_res_vlan = prop;
}
}
if (!napi_ndev) { /* CPSW Host port CPDMA interface is shared between * ports and there is only one TX and one RX IRQs * available for all possible TX and RX channels * accordingly.
*/
netif_napi_add(ndev, &cpsw->napi_rx,
cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ?
cpsw_tx_poll : cpsw_tx_mq_poll);
}
napi_ndev = ndev;
}
return ret;
}
staticvoid cpsw_unregister_ports(struct cpsw_common *cpsw)
{ int i = 0;
for (i = 0; i < cpsw->data.slaves; i++) { if (!cpsw->slaves[i].ndev) continue;
unregister_netdev(cpsw->slaves[i].ndev);
}
}
staticint cpsw_register_ports(struct cpsw_common *cpsw)
{ int ret = 0, i = 0;
for (i = 0; i < cpsw->data.slaves; i++) { if (!cpsw->slaves[i].ndev) continue;
/* register the network device */
ret = register_netdev(cpsw->slaves[i].ndev); if (ret) {
dev_err(cpsw->dev, "cpsw: err registering net device%d\n", i);
cpsw->slaves[i].ndev = NULL; break;
}
}
if (ret)
cpsw_unregister_ports(cpsw); return ret;
}
if (!cpsw->br_members) {
cpsw->hw_bridge_dev = br_ndev;
} else { /* This is adding the port to a second bridge, this is * unsupported
*/ if (cpsw->hw_bridge_dev != br_ndev) return -EOPNOTSUPP;
}
if (id != CPSW_DL_PARAM_SWITCH_MODE) return -EOPNOTSUPP;
if (switch_en == !cpsw->data.dual_emac) return 0;
if (!switch_en && cpsw->br_members) {
dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n"); return -EINVAL;
}
rtnl_lock();
for (i = 0; i < cpsw->data.slaves; i++) { struct cpsw_slave *slave = &cpsw->slaves[i]; struct net_device *sl_ndev = slave->ndev;
if (!sl_ndev || !netif_running(sl_ndev)) continue;
if_running = true;
}
if (!if_running) { /* all ndevs are down */
cpsw->data.dual_emac = !switch_en; for (i = 0; i < cpsw->data.slaves; i++) { struct cpsw_slave *slave = &cpsw->slaves[i]; struct net_device *sl_ndev = slave->ndev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); if (IS_ERR(mode)) {
ret = PTR_ERR(mode);
dev_err(dev, "gpio request failed, ret %d\n", ret); return ret;
}
clk = devm_clk_get(dev, "fck"); if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "fck is not found %d\n", ret); return ret;
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res); if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs); return ret;
}
cpsw->regs = ss_regs;
platform_set_drvdata(pdev, cpsw); /* This may be required here for child devices. */
pm_runtime_enable(dev);
/* Need to enable clocks with runtime PM api to access module * registers
*/
ret = pm_runtime_resume_and_get(dev); if (ret < 0) {
pm_runtime_disable(dev); return ret;
}
ret = cpsw_probe_dt(cpsw); if (ret) goto clean_dt_ret;
soc = soc_device_match(cpsw_soc_devices); if (soc)
cpsw->quirk_irq = true;
/* setup netdevs */
ret = cpsw_create_ports(cpsw); if (ret) goto clean_unregister_netdev;
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. * * If anyone wants to implement support for those, make sure to * first request and append them to irqs_table array.
*/
ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { /* Note, if this error path is taken, we're leaking some * resources.
*/
dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
ERR_PTR(ret)); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.