mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
CM_GCR_Cx_OTHER_BLOCK_LOCAL);
stat = read_cpc_co_stat_conf();
mips_cm_unlock_other();
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) return;
/* Set endianness & power up the CM */
mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
write_cpc_redir_pwrup_ctl(1);
mips_cm_unlock_other();
/* Wait for the CM to start up */
timeout = 1000;
mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
CM_GCR_Cx_OTHER_BLOCK_LOCAL); while (1) {
stat = read_cpc_co_stat_conf();
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) break;
if (timeout) {
mdelay(1);
timeout--;
} else {
pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
cluster, stat);
mdelay(1000);
}
}
/* Make sure no prefetched data in cache */
blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
__sync();
}
if (mips_cm_revision() >= CM_REV_CM3_5)
power_up_other_cluster(cl);
ncores = mips_cps_numcores(cl); for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(cl, c);
if (c > 0)
pr_cont(",");
pr_cont("%u", core_vpes);
/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ if (!cl && !c)
smp_num_siblings = core_vpes;
cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask);
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_set_cluster(&cpu_data[nvpes + v], cl);
cpu_set_core(&cpu_data[nvpes + v], c);
cpu_set_vpe_id(&cpu_data[nvpes + v], v);
}
nvpes += core_vpes;
}
pr_cont("}");
}
pr_cont(" total %u\n", nvpes);
/* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
set_cpu_possible(v, true);
set_cpu_present(v, true);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
/* Initialise core 0 */
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
if (allocate_cps_vecs())
pr_err("Failed to allocate CPS vectors\n");
if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
write_gcr_bev_base(core_entry_reg);
#ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu)
cpumask_set_cpu(0, &mt_fpu_cpumask); #endif/* CONFIG_MIPS_MT_FPAFF */
}
unsignedlong calibrate_delay_is_known(void)
{ int first_cpu_cluster = 0;
/* The calibration has to be done on the primary CPU of the cluster */ if (mips_cps_first_online_in_cluster(&first_cpu_cluster)) return 0;
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK; switch (cca) { case 0x4: /* CWBE */ case 0x5: /* CWB */ /* The CCA is coherent, multi-core is fine */
cca_unsuitable = false; break;
default: /* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true;
}
/* Warn the user if the CCA prevents multi-core */
cores_limited = false; if (cca_unsuitable || cpu_has_dc_aliases) {
for_each_present_cpu(c) { if (cpus_are_siblings(smp_processor_id(), c)) continue;
set_cpu_present(c, false);
cores_limited = true;
}
} if (cores_limited)
pr_warn("Using only one core due to %s%s%s\n",
cca_unsuitable ? "unsuitable CCA" : "",
(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
cpu_has_dc_aliases ? "dcache aliasing" : "");
while (!mips_cm_is_l2_hci_broken) {
l2_cfg = read_gcr_redir_l2_ram_config();
/* If HCI is not supported, use the state machine below */ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT)) break; if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED)) break;
/* If the HCI_DONE bit is set, we're finished */ if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE) return;
}
l2sm_cop = read_gcr_redir_l2sm_cop(); if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT), "L2 init not supported on this system yet")) return;
/* Clear L2 tag registers */
write_gcr_redir_l2_tag_state(0);
write_gcr_redir_l2_ecc(0);
/* Ensure the L2 tag writes complete before the state machine starts */
mb();
/* Wait for the L2 state machine to be idle */ do {
l2sm_cop = read_gcr_redir_l2sm_cop();
} while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
/* Start a store tag operation */
l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
write_gcr_redir_l2sm_cop(l2sm_cop);
/* Ensure the state machine starts before we poll for completion */
mb();
/* Wait for the operation to be complete */ do {
l2sm_cop = read_gcr_redir_l2sm_cop();
result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
} while (!result);
WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK, "L2 state machine failed cache init with error %u\n", result);
}
/* Ensure cluster GCRs are where we expect */
write_gcr_redir_base(read_gcr_base());
write_gcr_redir_cpc_base(read_gcr_cpc_base());
write_gcr_redir_gic_base(read_gcr_gic_base());
/* Set BEV base */
write_gcr_redir_bev_base(core_entry_reg);
mips_cm_unlock_other();
}
if (cluster != cpu_cluster(¤t_cpu_data)) {
mips_cm_lock_other(cluster, core, 0,
CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
/* Ensure the core can access the GCRs */
access = read_gcr_redir_access();
access |= BIT(core);
write_gcr_redir_access(access);
mips_cm_unlock_other();
} else { /* Ensure the core can access the GCRs */
access = read_gcr_access();
access |= BIT(core);
write_gcr_access(access);
}
/* Select the appropriate core */
mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */ if (mips_cm_is64)
write_gcr_co_reset64_base(core_entry_reg); else
write_gcr_co_reset_base(core_entry_reg);
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
/* Start it with the legacy memory map and exception base */
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */ if (mips_cm_revision() < CM_REV_CM3)
set_gcr_access(1 << core); else
set_gcr_access_cm3(1 << core);
if (mips_cpc_present()) { /* Reset the core */
mips_cpc_lock_other(core);
if (mips_cm_revision() >= CM_REV_CM3) { /* Run only the requested VP following the reset */
write_cpc_co_vp_stop(0xf);
write_cpc_co_vp_run(1 << vpe_id);
/* * Ensure that the VP_RUN register is written before the * core leaves reset.
*/
wmb();
}
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
timeout = 100; while (true) {
stat = read_cpc_co_stat_conf();
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
/* U6 == coherent execution, ie. the core is up */ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) break;
/* Delay a little while before we start warning */ if (timeout) {
timeout--;
mdelay(10); continue;
}
pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
core, stat);
mdelay(1000);
}
mips_cpc_unlock_other();
} else { /* Take the core out of reset */
write_gcr_co_reset_release(0);
}
mips_cm_unlock_other();
/* The core is now powered up */
bitmap_set(cluster_cfg->core_power, core, 1);
/* * Restore CM_PWRUP=0 so that the CM can power down if all the cores in * the cluster do (eg. if they're all removed via hotplug.
*/ if (mips_cm_revision() >= CM_REV_CM3_5) {
mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
write_cpc_redir_pwrup_ctl(0);
mips_cm_unlock_other();
}
}
if (!test_bit(core, cluster_cfg->core_power)) { /* Boot a VPE on a powered down core */
boot_core(cluster, core, vpe_id); goto out;
}
if (cpu_has_vp) {
mips_cm_lock_other(cluster, core, vpe_id,
CM_GCR_Cx_OTHER_BLOCK_LOCAL); if (mips_cm_is64)
write_gcr_co_reset64_base(core_entry_reg); else
write_gcr_co_reset_base(core_entry_reg);
mips_cm_unlock_other();
}
if (!cpus_are_siblings(cpu, smp_processor_id())) { /* Boot a VPE on another powered up core */ for (remote = 0; remote < NR_CPUS; remote++) { if (!cpus_are_siblings(cpu, remote)) continue; if (cpu_online(remote)) break;
} if (remote >= NR_CPUS) {
pr_crit("No online CPU in core %u to start CPU%d\n",
core, cpu); goto out;
}
err = smp_call_function_single(remote, remote_vpe_boot,
NULL, 1); if (err)
panic("Failed to call remote CPU\n"); goto out;
}
BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
/* Boot a VPE on this core */
mips_cps_boot_vpes(core_cfg, vpe_id);
out:
preempt_enable(); return 0;
}
staticvoid cps_init_secondary(void)
{ int core = cpu_core(¤t_cpu_data);
/* Disable MT - we only want to run 1 TC per VPE */ if (cpu_has_mipsmt)
dmt();
if (mips_cm_revision() >= CM_REV_CM3) { unsignedint ident = read_gic_vl_ident();
/* * Ensure that our calculation of the VP ID matches up with * what the GIC reports, otherwise we'll have configured * interrupts incorrectly.
*/
BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
}
if (core > 0 && !read_gcr_cl_coherence())
pr_warn("Core %u is not in coherent domain\n", core);
#ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu)
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); #endif/* CONFIG_MIPS_MT_FPAFF */
cpu = smp_processor_id();
core = cpu_core(&cpu_data[cpu]);
if (death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
pr_debug("Halting core %d VP%d\n", core, vpe_id); if (cpu_has_mipsmt) { /* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} elseif (cpu_has_vp) {
write_cpc_cl_vp_stop(1 << vpe_id);
/* Ensure that the VP_STOP register is written */
wmb();
}
} else { if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
pr_debug("Gating power to core %d\n", core); /* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
}
}
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
if (cpu_has_mipsmt || cpu_has_vp) { /* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) { if (!cpus_are_siblings(cpu, cpu_death_sibling)) continue;
/* * There is an online VPE within the core. Just halt * this TC and leave the core alone.
*/
cpu_death = CPU_DEATH_HALT; break;
}
}
cpuhp_ap_report_dead();
cps_shutdown_this_cpu(cpu_death);
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
}
/* * Now wait for the CPU to actually offline. Without doing this that * offlining may race with one or more of: * * - Onlining the CPU again. * - Powering down the core if another VPE within it is offlined. * - A sibling VPE entering a non-coherent state. * * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing * with which we could race, so do nothing.
*/ if (cpu_death == CPU_DEATH_POWER) { /* * Wait for the core to enter a powered down or clock gated * state, the latter happening when a JTAG probe is connected * in which case the CPC will refuse to power down the core.
*/
fail_time = ktime_add_ms(ktime_get(), 2000); do {
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE;
stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
mips_cpc_unlock_other();
mips_cm_unlock_other();
if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
stat == CPC_Cx_STAT_CONF_SEQSTATE_U2) break;
/* * The core ought to have powered down, but didn't & * now we don't really know what state it's in. It's * likely that its _pwr_up pin has been wired to logic * 1 & it powered back up as soon as we powered it * down... * * The best we can do is warn the user & continue in * the hope that the core is doing nothing harmful & * might behave properly if we online it later.
*/ if (WARN(ktime_after(ktime_get(), fail_time), "CPU%u hasn't powered down, seq. state %u\n",
cpu, stat)) break;
} while (1);
/* Indicate the core is powered off */
bitmap_clear(cluster_cfg->core_power, core, 1);
} elseif (cpu_has_mipsmt) { /* * Have a CPU with access to the offlined CPUs registers wait * for its TC to halt.
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
(void *)(unsignedlong)cpu, 1); if (err)
panic("Failed to call remote sibling CPU\n");
} elseif (cpu_has_vp) { do {
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
stat = read_cpc_co_vp_running();
mips_cm_unlock_other();
} while (stat & (1 << vpe_id));
}
}
int register_cps_smp_ops(void)
{ if (!mips_cm_present()) {
pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); return -ENODEV;
}
/* check we have a GIC - we need one for IPIs */ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); return -ENODEV;
}
register_smp_ops(&cps_smp_ops); return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.