if ((offset + size) > MAX_WAKE_FILTER_SIZE) return -EINVAL;
while (size--) { /* The HW only accepts 4 byte aligned writes, so if we * begin unaligned or if remaining bytes less than 4, * we need to read then write to avoid losing current * register state
*/ if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
match_val = bcmasp_netfilt_rd(priv, nfilt,
ASP_NETFILT_MATCH,
ALIGN_DOWN(offset, 4));
mask_val = bcmasp_netfilt_rd(priv, nfilt,
ASP_NETFILT_MASK,
ALIGN_DOWN(offset, 4));
}
/* Write all filters to HW */ for (i = 0; i < priv->num_net_filters; i++) { /* If the filter does not match the port, skip programming. */ if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port) continue;
if (i > 0 && (i % 2) &&
priv->net_filters[i].wake_filter &&
priv->net_filters[i - 1].wake_filter) continue;
ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]); if (!ret)
write = true;
}
/* Successfully programmed at least one wake filter * so enable top level wake config
*/ if (write)
rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
ASP_RX_FILTER_LNR_MD |
ASP_RX_FILTER_GEN_WK_EN |
ASP_RX_FILTER_NT_FLT_EN),
ASP_RX_FILTER_BLK_CTRL);
}
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt)
{ struct bcmasp_priv *priv = intf->parent; int j = 0, i;
for (i = 0; i < priv->num_net_filters; i++) { if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port) continue;
if (i > 0 && (i % 2) &&
priv->net_filters[i].wake_filter &&
priv->net_filters[i - 1].wake_filter) continue;
/* If no network filter found, return open filter. * If no more open filters return NULL
*/ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
u32 loc, bool wake_filter, bool init)
{ struct bcmasp_net_filter *nfilter = NULL; struct bcmasp_priv *priv = intf->parent; int i, open_index = -1;
/* Check whether we exceed the filter table capacity */ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters) return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing * the filter (insertion), return a busy error code.
*/ if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed) return ERR_PTR(-EBUSY);
/* We need two filters for wake-up, so we cannot use an odd filter */ if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2)) return ERR_PTR(-EINVAL);
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
for ( ; i < priv->num_net_filters; i++) { /* Found matching network filter */ if (!init &&
priv->net_filters[i].claimed &&
priv->net_filters[i].hw_index == i &&
priv->net_filters[i].port == intf->port) return &priv->net_filters[i];
/* If we don't need a new filter or new filter already found */ if (!init || open_index >= 0) continue;
/* Wake filter conslidates two filters to cover more bytes * Wake filter is open if... * 1. It is an even filter * 2. The current and next filter is not claimed
*/ if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
!priv->net_filters[i + 1].claimed)
open_index = i; elseif (!priv->net_filters[i].claimed)
open_index = i;
}
/* There are 32 MDA filters shared between all ports, we reserve 4 filters per * port for the following. * - Promisc: Filter to allow all packets when promisc is enabled * - All Multicast * - Broadcast * - Own address * * The reserved filters are identified as so. * - Promisc: (index * 4) + 0 * - All Multicast: (index * 4) + 1 * - Broadcast: (index * 4) + 2 * - Own address: (index * 4) + 3
*/ enum asp_rx_filter_id {
ASP_RX_FILTER_MDA_PROMISC = 0,
ASP_RX_FILTER_MDA_ALLMULTI,
ASP_RX_FILTER_MDA_BROADCAST,
ASP_RX_FILTER_MDA_OWN_ADDR,
ASP_RX_FILTER_MDA_RES_MAX,
};
eth_zero_addr(promisc); /* Set mask to 00:00:00:00:00:00 to match all packets */
bcmasp_set_mda_filter(intf, promisc, promisc, i);
bcmasp_en_mda_filter(intf, en, i);
}
/* Disable all filters held by this port */ for (i = res_count; i < priv->num_mda_filters; i++) { if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
}
}
/* Switch to u64 to help with the calculations */
addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
addr2 = ether_addr_to_u64(addr);
mask2 = ether_addr_to_u64(mask);
/* Check if one filter resides within the other */
mask3 = mask1 & mask2; if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) { /* Filter 2 resides within filter 1, so everything is good */ return 0;
} elseif (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) { /* Filter 1 resides within filter 2, so swap filters */
bcmasp_set_mda_filter(intf, addr, mask, i); return 0;
}
/* Unable to combine */ return -EINVAL;
}
int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsignedchar *addr, unsignedchar *mask)
{ struct bcmasp_priv *priv = intf->parent; int ret, res_count; unsignedint i;
for (i = res_count; i < priv->num_mda_filters; i++) { /* If filter not enabled or belongs to another port skip */ if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port) continue;
/* Attempt to combine filters */
ret = bcmasp_combine_set_filter(intf, addr, mask, i); if (!ret) {
intf->mib.filters_combine_cnt++; return 0;
}
}
/* Create new filter if possible */ for (i = res_count; i < priv->num_mda_filters; i++) { if (priv->mda_filters[i].en) continue;
/* Disable all filters and reset software view since the HW * can lose context while in deep sleep suspend states
*/ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set * GEN_WAKE_CLEAR to clear the network filter wake-up which would * otherwise be sticky
*/
rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
ASP_RX_FILTER_MDA_EN |
ASP_RX_FILTER_GEN_WK_CLR |
ASP_RX_FILTER_NT_FLT_EN),
ASP_RX_FILTER_BLK_CTRL);
}
/* When enabling an interface, if the RX or TX clocks were not enabled, * enable them. Conversely, while disabling an interface, if this is * the last one enabled, we can turn off the shared RX and TX clocks as * well. We control enable bits which is why we test for equality on * the RGMII clock bit mask.
*/
spin_lock_irqsave(&priv->clk_lock, flags); if (en) {
intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
bcmasp_core_clock_set_ll(priv, intf_mask, 0);
} else {
reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask; if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
bcmasp_core_clock_set_ll(priv, 0, intf_mask);
}
spin_unlock_irqrestore(&priv->clk_lock, flags);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
priv->irq = platform_get_irq(pdev, 0); if (priv->irq <= 0) return -EINVAL;
priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp"); if (IS_ERR(priv->clk)) return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to request clock\n");
/* Base from parent node */
priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); if (ret) return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
pdev->name, priv); if (ret) return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
/* ASP specific initialization, Needs to be done regardless of * how many interfaces come up.
*/
bcmasp_core_init(priv);
priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters, sizeof(*priv->mda_filters), GFP_KERNEL); if (!priv->mda_filters) return -ENOMEM;
priv->net_filters = devm_kcalloc(dev, priv->num_net_filters, sizeof(*priv->net_filters), GFP_KERNEL); if (!priv->net_filters) return -ENOMEM;
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports"); if (!ports_node) {
dev_warn(dev, "No ports found\n"); return -EINVAL;
}
i = 0;
for_each_available_child_of_node_scoped(ports_node, intf_node) {
intf = bcmasp_interface_create(priv, intf_node, i); if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
ret = -ENOMEM; goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
i++;
}
/* Check and enable WoL */
bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close() * manage it for us from now on.
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
clk_disable_unprepare(priv->clk);
/* Now do the registration of the network ports which will take care * of managing the clock properly.
*/
list_for_each_entry(intf, &priv->intfs, list) {
ret = register_netdev(intf->ndev); if (ret) {
netdev_err(intf->ndev, "failed to register net_device: %d\n", ret);
bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv); goto of_put_exit;
}
count++;
}
list_for_each_entry(intf, &priv->intfs, list) {
ret = bcmasp_interface_suspend(intf); if (ret) break;
}
ret = clk_prepare_enable(priv->clk); if (ret) return ret;
/* Whether Wake-on-LAN is enabled or not, we can always disable * the shared TX clock
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.