/* Return the number of active ports, not counting the IMP (CPU) port */ staticunsignedint bcm_sf2_num_active_ports(struct dsa_switch *ds)
{ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsignedint port, count = 0;
for (port = 0; port < ds->num_ports; port++) { if (dsa_is_cpu_port(ds, port)) continue; if (priv->port_sts[port].enabled)
count++;
}
ports_active = bcm_sf2_num_active_ports(ds); if (ports_active == 0 || !priv->clk_mdiv) return;
/* If we overflow our table, just use the recommended operational * frequency
*/ if (ports_active > ARRAY_SIZE(rate_table))
new_rate = 90000000; else
new_rate = rate_table[ports_active - 1];
clk_set_rate(priv->clk_mdiv, new_rate);
}
/* Enable IMP port in dumb mode */
reg = core_readl(priv, CORE_SWITCH_CTRL);
reg |= MII_DUMB_FWDG_EN;
core_writel(priv, reg, CORE_SWITCH_CTRL);
/* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number
*/
reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
staticinlinevoid bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, int port)
{ unsignedint off;
switch (port) { case 7:
off = P7_IRQ_OFF; break; case 0: /* Port 0 interrupts are located on the first bank */
intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); return; default:
off = P_IRQ_OFF(port); break;
}
intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
}
staticinlinevoid bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, int port)
{ unsignedint off;
switch (port) { case 7:
off = P7_IRQ_OFF; break; case 0: /* Port 0 interrupts are located on the first bank */
intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); return; default:
off = P_IRQ_OFF(port); break;
}
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
/* Enable Broadcom tags for that port if requested */ if (priv->brcm_tag_mask & BIT(port))
b53_brcm_hdr_setup(ds, port);
/* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number
*/
reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
/* Re-enable the GPHY and re-apply workarounds */ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
bcm_sf2_gphy_enable_set(ds, true); if (phy) { /* if phy_stop() has been called before, phy * will be in halted state, and phy_start() * will call resume. * * the resume path does not configure back * autoneg settings, and since we hard reset * the phy manually here, we need to reset the * state machine also.
*/
phy->state = PHY_READY;
phy_init_hw(phy);
}
}
/* Enable MoCA port interrupts to get notified */ if (port == priv->moca_port)
bcm_sf2_port_intr_enable(priv, port);
/* Set per-queue pause threshold to 32 */
core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
/* Set ACB threshold to 24 */ for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
reg = acb_readl(priv, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
reg &= ~XOFF_THRESHOLD_MASK;
reg |= 24;
acb_writel(priv, reg, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
}
/* Disable learning while in WoL mode */ if (priv->wol_ports_mask & (1 << port)) {
reg = core_readl(priv, CORE_DIS_LEARN);
reg |= BIT(port);
core_writel(priv, reg, CORE_DIS_LEARN); return;
}
if (port == priv->moca_port)
bcm_sf2_port_intr_disable(priv, port);
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, false);
b53_disable_port(ds, port);
/* Power down the port memory */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg |= P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
priv->port_sts[port].enabled = false;
bcm_sf2_recalc_clock(ds);
}
staticint bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val)
{ int ret = 0;
u32 reg;
/* The watchdog reset does not work on 7278, we need to hit the * "external" reset line through the reset controller.
*/ if (priv->type == BCM7278_DEVICE_ID) {
ret = reset_control_assert(priv->rcdev); if (ret) return ret;
for_each_available_child_of_node(dn, port) { if (of_property_read_u32(port, "reg", &port_num)) continue;
if (port_num >= DSA_MAX_PORTS) {
dev_err(dev, "Invalid port number %d\n", port_num); continue;
}
port_st = &priv->port_sts[port_num];
/* Internal PHYs get assigned a specific 'phy-mode' property * value: "internal" to help flag them before MDIO probing * has completed, since they might be turned off at that * time
*/
err = of_get_phy_mode(port, &port_st->mode); if (err) continue;
if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
priv->int_phy_mask |= 1 << port_num;
if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
priv->brcm_tag_mask |= 1 << port_num;
/* Ensure that port 5 is not picked up as a DSA CPU port * flavour but a regular port instead. We should be using * devlink to be able to set the port flavour.
*/ if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
prop = of_find_property(port, "ethernet", NULL); if (prop)
of_remove_property(port, prop);
}
}
}
/* Include the pseudo-PHY address to divert reads towards our * workaround. This is only required for 7445D0, since 7445E0 * disconnects the internal switch pseudo-PHY such that we can use the * regular SWITCH_MDIO master controller instead. * * Here we flag the pseudo PHY as needing special treatment and would * otherwise make all other PHY read/writes go to the master MDIO bus * controller that comes with this switch backed by the "mdio-unimac" * driver.
*/ if (of_machine_is_compatible("brcm,bcm7445d0"))
priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0); else
priv->indir_phy_mask = 0;
/* We need to make sure that of_phy_connect() will not work by * removing the 'phandle' and 'linux,phandle' properties and * unregister the existing PHY device that was already registered.
*/
for_each_available_child_of_node(dn, child) { if (of_property_read_u32(child, "reg", ®) ||
reg >= PHY_MAX_ADDR) continue;
if (!(priv->indir_phy_mask & BIT(reg))) continue;
prop = of_find_property(child, "phandle", NULL); if (prop)
of_remove_property(child, prop);
prop = of_find_property(child, "linux,phandle", NULL); if (prop)
of_remove_property(child, prop);
phydev = of_phy_find_device(child); if (phydev) {
phy_device_remove(phydev);
phy_device_free(phydev);
}
}
err = mdiobus_register(priv->user_mii_bus); if (err) goto err_free_user_mii_bus;
/* The BCM7xxx PHY driver expects to find the integrated PHY revision * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is.
*/ if (priv->int_phy_mask & BIT(port)) return priv->hw_params.gphy_rev; else return PHY_BRCM_AUTO_PWRDWN_ENABLE |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_IDDQ_SUSPEND;
}
/* Clear id_mode_dis bit, and the existing port mode, let * RGMII_MODE_EN bet set by mac_link_{up,down}
*/
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~ID_MODE_DIS;
reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
reg |= port_mode; if (id_mode_dis)
reg |= ID_MODE_DIS;
/* If the link is down, just disable the interface to conserve power */
reg = reg_readl(priv, reg_rgmii_ctrl); if (link)
reg |= RGMII_MODE_EN; else
reg &= ~RGMII_MODE_EN;
reg_writel(priv, reg, reg_rgmii_ctrl);
}
/* MoCA port is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. * * For the other ports, we just force the link status, since this is * a fixed PHY device.
*/ if (port == priv->moca_port) {
status->link = priv->port_sts[port].link; /* For MoCA interfaces, also force a link down notification * since some version of the user-space daemon (mocad) use * cmd->autoneg to force the link, which messes up the PHY * state machine and make it go in PHY_FORCING state instead.
*/ if (!status->link)
netif_carrier_off(dsa_to_port(ds, port)->user);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
}
}
/* Disable all ports physically present including the IMP * port, the other ones have already been disabled during * bcm_sf2_sw_setup
*/ for (port = 0; port < ds->num_ports; port++) { if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port);
}
if (!priv->wol_ports_mask)
clk_disable_unprepare(priv->clk);
/* If we have at least one port enabled, make sure the CPU port * is also enabled. If the CPU port is the last one enabled, we disable * it since this configuration does not make sense.
*/ if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
priv->wol_ports_mask |= (1 << cpu_port); else
priv->wol_ports_mask &= ~(1 << cpu_port);
/* The SWITCH_CORE register space is managed by b53 but operates on a page + * register basis so we need to translate that into an address that the * bus-glue understands.
*/ #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, "switch"); if (IS_ERR(priv->rcdev)) return PTR_ERR(priv->rcdev);
/* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for * b53_common to work with
*/
pdata->chip_id = priv->type;
dev->pdata = pdata;
base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
*base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(*base)) {
pr_err("unable to find register: %s\n", reg_names[i]); return PTR_ERR(*base);
}
base++;
}
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch"); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk);
ret = clk_prepare_enable(priv->clk); if (ret) return ret;
priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); if (IS_ERR(priv->clk_mdiv)) {
ret = PTR_ERR(priv->clk_mdiv); goto out_clk;
}
ret = clk_prepare_enable(priv->clk_mdiv); if (ret) goto out_clk;
ret = bcm_sf2_sw_rst(priv); if (ret) {
pr_err("unable to software reset switch: %d\n", ret); goto out_clk_mdiv;
}
bcm_sf2_crossbar_setup(priv);
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
ret = bcm_sf2_mdio_register(ds); if (ret) {
pr_err("failed to register MDIO bus\n"); goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
ret = bcm_sf2_cfp_rst(priv); if (ret) {
pr_err("failed to reset CFP\n"); goto out_mdio;
}
/* Disable all interrupts and request them */
bcm_sf2_intr_disable(priv);
ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, "switch_0", ds); if (ret < 0) {
pr_err("failed to request switch_0 IRQ\n"); goto out_mdio;
}
ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, "switch_1", ds); if (ret < 0) {
pr_err("failed to request switch_1 IRQ\n"); goto out_mdio;
}
/* Get the maximum number of ports for this switch */
priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; if (priv->hw_params.num_ports > DSA_MAX_PORTS)
priv->hw_params.num_ports = DSA_MAX_PORTS;
/* Assume a single GPHY setup if we can't read that property */ if (of_property_read_u32(dn, "brcm,num-gphy",
&priv->hw_params.num_gphy))
priv->hw_params.num_gphy = 1;
/* For a kernel about to be kexec'd we want to keep the GPHY on for a * successful MDIO bus scan to occur. If we did turn off the GPHY * before (e.g: port_disable), this will also power it back on. * * Do not rely on kexec_in_progress, just power the PHY on.
*/ if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.