/* The read-ahead cache present in the Brahma-B53 CPU is a special piece of * hardware after the integrated L2 cache of the B53 CPU complex whose purpose * is to prefetch instruction and/or data with a line size of either 64 bytes * or 256 bytes. The rationale is that the data-bus of the CPU interface is * optimized for 256-byte transactions, and enabling the read-ahead cache * provides a significant performance boost (typically twice the performance * for a memcpy benchmark application). * * The read-ahead cache is transparent for Virtual Address cache maintenance * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC. So no special * handling is needed for the DMA API above and beyond what is included in the * arm64 implementation. * * In addition, since the Point of Unification is typically between L1 and L2 * for the Brahma-B53 processor no special read-ahead cache handling is needed * for the IC IALLU and IC IALLUIS cache maintenance operations. * * However, it is not possible to specify the cache level (L3) for the cache * maintenance instructions operating by set/way to operate on the read-ahead * cache. The read-ahead cache will maintain coherency when inner cache lines * are cleaned by set/way, but if it is necessary to invalidate inner cache * lines by set/way to maintain coherency with system masters operating on * shared memory that does not have hardware support for coherency, then it * will also be necessary to explicitly invalidate the read-ahead cache.
*/ staticvoid __init a72_b53_rac_enable_all(struct device_node *np)
{ unsignedint cpu;
u32 enable = 0, pref_dist, shift;
if (IS_ENABLED(CONFIG_CACHE_B15_RAC)) return;
if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) return;
for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) { if (BRCM_ID(reg) == a72_b53_mach_compat[i]) break;
}
if (i == ARRAY_SIZE(a72_b53_mach_compat)) return;
/* Set all 3 MCP interfaces to 8 credits */
reg = cbc_readl(CPU_CREDIT_REG); for (i = 0; i < 3; i++) {
reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
}
cbc_writel(reg, CPU_CREDIT_REG);
/* Max out the number of in-flight Jwords reads on the MCP interface */
reg = cbc_readl(CPU_MCP_FLOW_REG); for (i = 0; i < 3; i++)
reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
cbc_writel(reg, CPU_MCP_FLOW_REG);
staticint __init brcmstb_biuctrl_init(void)
{ struct device_node *np; int ret;
/* We might be running on a multi-platform kernel, don't make this a * fatal error, just bail out early
*/
np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl"); if (!np) return 0;
ret = setup_hifcpubiuctrl_regs(np); if (ret) goto out_put;
ret = mcp_write_pairing_set(); if (ret) {
pr_err("MCP: Unable to disable write pairing!\n"); goto out_put;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.