if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
pr_err("%s: could not get reset control for core %d\n",
__func__, pd); return PTR_ERR(rstc);
}
/* * We need to soft reset the cpu when we turn off the cpu power domain, * or else the active processors might be stalled when the individual * processor is powered down.
*/ if (!IS_ERR(rstc) && !on)
reset_control_assert(rstc);
if (has_pmu) {
ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val); if (ret < 0) {
pr_err("%s: could not update power domain\n",
__func__); return ret;
}
ret = -1; while (ret != on) {
ret = pmu_power_domain_is_on(pd); if (ret < 0) {
pr_err("%s: could not read power domain state\n",
__func__); return ret;
}
}
}
if (!IS_ERR(rstc)) { if (on)
reset_control_deassert(rstc);
reset_control_put(rstc);
}
return 0;
}
/* * Handling of CPU cores
*/
staticint rockchip_boot_secondary(unsignedint cpu, struct task_struct *idle)
{ int ret;
if (!sram_base_addr || (has_pmu && !pmu)) {
pr_err("%s: sram or pmu missing for cpu boot\n", __func__); return -ENXIO;
}
if (cpu >= ncores) {
pr_err("%s: cpu %d outside maximum number of cpus %d\n",
__func__, cpu, ncores); return -ENXIO;
}
/* start the core */
ret = pmu_set_power_domain(0 + cpu, true); if (ret < 0) return ret;
if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) { /* * We communicate with the bootrom to active the cpus other * than cpu0, after a blob of initialize code, they will * stay at wfe state, once they are activated, they will check * the mailbox: * sram_base_addr + 4: 0xdeadbeaf * sram_base_addr + 8: start address for pc * The cpu0 need to wait the other cpus other than cpu0 entering * the wfe state.The wait time is affected by many aspects. * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
*/
mdelay(1); /* ensure the cpus other than cpu0 to startup */
/** * rockchip_smp_prepare_sram - populate necessary sram block * Starting cores execute the code residing at the start of the on-chip sram * after power-on. Therefore make sure, this sram region is reserved and * big enough. After this check, copy the trampoline code that directs the * core to the real startup code in ram into the sram-region. * @node: mmio-sram device node
*/ staticint __init rockchip_smp_prepare_sram(struct device_node *node)
{ unsignedint trampoline_sz = &rockchip_secondary_trampoline_end -
&rockchip_secondary_trampoline; struct resource res; unsignedint rsize; int ret;
ret = of_address_to_resource(node, 0, &res); if (ret < 0) {
pr_err("%s: could not get address for node %pOF\n",
__func__, node); return ret;
}
rsize = resource_size(&res); if (rsize < trampoline_sz) {
pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
__func__, rsize, trampoline_sz); return -EINVAL;
}
/* set the boot function for the sram code */
rockchip_boot_fn = __pa_symbol(secondary_startup);
/* copy the trampoline to sram, that runs during startup of the core */
memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
flush_cache_all();
outer_clean_range(0, trampoline_sz);
/* * This function is only called via smp_ops->smp_prepare_cpu(). * That only happens if a "/cpus" device tree node exists * and has an "enable-method" property that selects the SMP * operations defined herein.
*/
node = of_find_node_by_path("/cpus");
pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
of_node_put(node); if (!IS_ERR(pmu)) return 0;
pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu"); if (!IS_ERR(pmu)) return 0;
/* fallback, create our own regmap for the pmu area */
pmu = NULL;
node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu"); if (!node) {
pr_err("%s: could not find pmu dt node\n", __func__); return -ENODEV;
}
pmu_base = of_iomap(node, 0);
of_node_put(node); if (!pmu_base) {
pr_err("%s: could not map pmu registers\n", __func__); return -ENOMEM;
}
pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config); if (IS_ERR(pmu)) { int ret = PTR_ERR(pmu);
scu_base_addr = of_iomap(node, 0); if (!scu_base_addr) {
pr_err("%s: could not map scu registers\n", __func__);
of_node_put(node); return;
}
/* * While the number of cpus is gathered from dt, also get the * number of cores from the scu to verify this value when * booting the cores.
*/
ncores = scu_get_core_count(scu_base_addr);
pr_err("%s: ncores %d\n", __func__, ncores);
#ifdef CONFIG_HOTPLUG_CPU staticint rockchip_cpu_kill(unsignedint cpu)
{ /* * We need a delay here to ensure that the dying CPU can finish * executing v7_coherency_exit() and reach the WFI/WFE state * prior to having the power domain disabled.
*/
mdelay(1);
pmu_set_power_domain(0 + cpu, false); return 1;
}
staticvoid rockchip_cpu_die(unsignedint cpu)
{
v7_exit_coherency_flush(louis); while (1)
cpu_do_idle();
} #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.