// SPDX-License-Identifier: GPL-2.0-or-later /* * Common boot and setup code for both 32-bit and 64-bit. * Extracted from arch/powerpc/kernel/setup_64.c. * * Copyright (C) 2001 PPC64 Team, IBM Corp
*/
/* The main machine-dep calls structure
*/ struct machdep_calls ppc_md;
EXPORT_SYMBOL(ppc_md); struct machdep_calls *machine_id;
EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid); int __initdata boot_core_hwid = -1;
#ifdef CONFIG_PPC64 int boot_cpu_hwid = -1; #endif
/* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started.
*/ int dcache_bsize; int icache_bsize;
/* Variables required to store legacy IO irq routing */ int of_i8042_kbd_irq;
EXPORT_SYMBOL_GPL(of_i8042_kbd_irq); int of_i8042_aux_irq;
EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
#ifdef __DO_IRQ_CANON /* XXX should go elsewhere eventually */ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs); #endif
#ifdef CONFIG_CRASH_DUMP /* This keeps a track of which one is the crashing cpu. */ int crashing_cpu = -1; #endif
/* also used by kexec */ void machine_shutdown(void)
{ /* * if fadump is active, cleanup the fadump registration before we * shutdown.
*/
fadump_cleanup();
if (ppc_md.machine_shutdown)
ppc_md.machine_shutdown();
}
staticvoid machine_hang(void)
{
pr_emerg("System Halted, OK to turn off power\n");
local_irq_disable(); while (1)
;
}
void machine_restart(char *cmd)
{
machine_shutdown(); if (ppc_md.restart)
ppc_md.restart(cmd);
smp_send_stop();
do_kernel_restart(cmd);
mdelay(1000);
machine_hang();
}
void machine_power_off(void)
{
machine_shutdown();
do_kernel_power_off();
smp_send_stop();
machine_hang();
} /* Used by the G5 thermal driver */
EXPORT_SYMBOL_GPL(machine_power_off);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
seq_puts(m, ", altivec supported");
seq_putc(m, '\n');
#ifdef CONFIG_TAU if (cpu_has_feature(CPU_FTR_TAU)) { if (IS_ENABLED(CONFIG_TAU_AVERAGE)) { /* more straightforward, but potentially misleading */
seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
cpu_temp(cpu_id));
} else { /* show the actual temp sensor range */
u32 temp;
temp = cpu_temp_both(cpu_id);
seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
temp & 0xff, temp >> 16);
}
} #endif/* CONFIG_TAU */
/* * Platforms that have variable clock rates, should implement * the method ppc_md.get_proc_freq() that reports the clock * rate of a given cpu. The rest can use ppc_proc_freq to * report the clock rate that is same across all cpus.
*/ if (ppc_md.get_proc_freq)
proc_freq = ppc_md.get_proc_freq(cpu_id); else
proc_freq = ppc_proc_freq;
/* If we are a Freescale core do a simple check so
* we don't have to keep adding cases in the future */ if (PVR_VER(pvr) & 0x8000) { switch (PVR_VER(pvr)) { case 0x8000: /* 7441/7450/7451, Voyager */ case 0x8001: /* 7445/7455, Apollo 6 */ case 0x8002: /* 7447/7457, Apollo 7 */ case 0x8003: /* 7447A, Apollo 7 PM */ case 0x8004: /* 7448, Apollo 8 */ case 0x800c: /* 7410, Nitro */
maj = ((pvr >> 8) & 0xF);
min = PVR_MIN(pvr); break; default: /* e500/book-e */
maj = PVR_MAJ(pvr);
min = PVR_MIN(pvr); break;
}
} else { switch (PVR_VER(pvr)) { case 0x1008: /* 740P/750P ?? */
maj = ((pvr >> 8) & 0xFF) - 1;
min = pvr & 0xFF; break; case 0x004e: /* POWER9 bits 12-15 give chip type */ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF; break; default:
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF; break;
}
}
/* If this is the last cpu, print the summary */ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
show_cpuinfo_summary(m);
return 0;
}
staticvoid *c_start(struct seq_file *m, loff_t *pos)
{ if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = cpumask_first(cpu_online_mask); else
*pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return (void *)(unsignedlong)(*pos + 1); return NULL;
}
/* If we were passed an initrd, set the ROOT_DEV properly if the values * look sensible. If not, clear initrd reference.
*/ if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
initrd_end > initrd_start)
ROOT_DEV = Root_RAM0; else
initrd_start = initrd_end = 0;
if (initrd_start)
pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
/* This implementation only supports power of 2 number of threads * for simplicity and performance
*/
threads_shift = ilog2(tpc);
BUG_ON(tpc != (1 << threads_shift));
for (i = 0; i < tpc; i++)
cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
tpc, str_plural(tpc));
printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
}
u32 *cpu_to_phys_id = NULL;
staticint assign_threads(unsignedint cpu, unsignedint nthreads, bool present, const __be32 *hw_ids)
{ for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
__be32 hwid;
hwid = be32_to_cpu(hw_ids[i]);
DBG(" thread %d -> cpu %d (hard id %d)\n", i, cpu, hwid);
/** * setup_cpu_maps - initialize the following cpu maps: * cpu_possible_mask * cpu_present_mask * * Having the possible map set up early allows us to restrict allocations * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. * * We do not initialize the online map here; cpus set their own bits in * cpu_online_mask as they come up. * * This function is valid only for Open Firmware systems. finish_device_tree * must be called before using this. * * While we're here, we may as well set the "physical" cpu ids in the paca. * * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
*/ void __init smp_setup_cpu_maps(void)
{ struct device_node *dn; int cpu = 0; int nthreads = 1;
if (cpu < nr_cpu_ids)
cpu = assign_threads(cpu, nthreads, avail, intserv);
}
/* If no SMT supported, nthreads is forced to 1 */ if (!cpu_has_feature(CPU_FTR_SMT)) {
DBG(" SMT disabled ! nthreads forced to 1\n");
nthreads = 1;
}
#ifdef CONFIG_PPC64 /* * On pSeries LPAR, we need to know how many cpus * could possibly be added to this partition.
*/ if (firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; const __be32 *ireg;
/* Initialize CPU <=> thread mapping/ * * WARNING: We assume that the number of threads is the same for * every CPU in the system. If that is not the case, then some code * here will have to be reworked
*/
cpu_init_thread_core_maps(nthreads);
/* Now that possible cpus are set, set nr_cpu_ids for later use */
setup_nr_cpu_ids();
free_unused_pacas();
} #endif/* CONFIG_SMP */
#ifdef CONFIG_PCSPKR_PLATFORM static __init int add_pcspkr(void)
{ struct device_node *np; struct platform_device *pd; int ret;
np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
of_node_put(np); if (!np) return -ENODEV;
pd = platform_device_alloc("pcspkr", -1); if (!pd) return -ENOMEM;
ret = platform_device_add(pd); if (ret)
platform_device_put(pd);
/* * Iterate all ppc_md structures until we find the proper * one for the current machine type
*/
DBG("Probing machine type ...\n");
/* * Check ppc_md is empty, if not we have a bug, ie, we setup an * entry before probe_machine() which will be overwritten
*/ for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) { if (((void **)&ppc_md)[i]) {
printk(KERN_ERR "Entry %d in ppc_md non empty before" " machine probe !\n", i);
}
}
for (machine_id = &__machine_desc_start;
machine_id < &__machine_desc_end;
machine_id++) {
DBG(" %s ...\n", machine_id->name); if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible)) continue; if (machine_id->compatibles && !of_machine_compatible_match(machine_id->compatibles)) continue;
memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); if (ppc_md.probe && !ppc_md.probe()) continue;
DBG(" %s match !\n", machine_id->name); break;
} /* What can we do if we didn't find ? */ if (machine_id >= &__machine_desc_end) {
pr_err("No suitable machine description found !\n"); for (;;);
}
// Append the machine name to other info we've gathered
seq_buf_puts(&ppc_hw_desc, ppc_md.name);
// Set the generic hardware description shown in oopses
dump_stack_set_arch_desc(ppc_hw_desc.buffer);
/* Match a class of boards, not a specific device configuration. */ int check_legacy_ioport(unsignedlong base_port)
{ struct device_node *parent, *np = NULL; int ret = -ENODEV;
switch(base_port) { case I8042_DATA_REG: if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); if (np) {
parent = of_get_parent(np);
of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0); if (!of_i8042_kbd_irq)
of_i8042_kbd_irq = 1;
of_i8042_aux_irq = irq_of_parse_and_map(parent, 1); if (!of_i8042_aux_irq)
of_i8042_aux_irq = 12;
of_node_put(np);
np = parent; break;
}
np = of_find_node_by_type(NULL, "8042"); /* Pegasos has no device_type on its 8042 node, look for the
* name instead */ if (!np)
np = of_find_node_by_name(NULL, "8042"); if (np) {
of_i8042_kbd_irq = 1;
of_i8042_aux_irq = 12;
} break; case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc"); break; default: /* ipmi is supposed to fail here */ break;
} if (!np) return ret;
parent = of_get_parent(np); if (parent) { if (of_node_is_type(parent, "isa"))
ret = 0;
of_node_put(parent);
}
of_node_put(np); return ret;
}
EXPORT_SYMBOL(check_legacy_ioport);
/* * Panic notifiers setup * * We have 3 notifiers for powerpc, each one from a different "nature": * * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables * IRQs and deal with the Firmware-Assisted dump, when it is configured; * should run early in the panic path. * * - dump_kernel_offset() is an informative notifier, just showing the KASLR * offset if we have RANDOMIZE_BASE set. * * - ppc_panic_platform_handler() is a low-level handler that's registered * only if the platform wishes to perform final actions in the panic path, * hence it should run late and might not even return. Currently, only * pseries and ps3 platforms register callbacks.
*/ staticint ppc_panic_fadump_handler(struct notifier_block *this, unsignedlong event, void *ptr)
{ /* * panic does a local_irq_disable, but we really * want interrupts to be hard disabled.
*/
hard_irq_disable();
/* * If firmware-assisted dump has been registered then trigger * its callback and let the firmware handles everything else.
*/
crash_fadump(NULL, ptr);
staticint ppc_panic_platform_handler(struct notifier_block *this, unsignedlong event, void *ptr)
{ /* * This handler is only registered if we have a panic callback * on ppc_md, hence NULL check is not needed. * Also, it may not return, so it runs really late on panic path.
*/
ppc_md.panic(ptr);
return NOTIFY_DONE;
}
staticstruct notifier_block ppc_fadump_block = {
.notifier_call = ppc_panic_fadump_handler,
.priority = INT_MAX, /* run early, to notify the firmware ASAP */
};
staticstruct notifier_block ppc_panic_block = {
.notifier_call = ppc_panic_platform_handler,
.priority = INT_MIN, /* may not return; must be done last */
};
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
/* Low-level platform-specific routines that should run on panic */ if (ppc_md.panic)
atomic_notifier_chain_register(&panic_notifier_list,
&ppc_panic_block);
}
#ifdef CONFIG_CHECK_CACHE_COHERENCY /* * For platforms that have configurable cache-coherency. This function * checks that the cache coherency setting of the kernel matches the setting * left by the firmware, as indicated in the device tree. Since a mismatch * will eventually result in DMA failures, we print * and error and call * BUG() in that case.
*/
/* * Called into from start_kernel this initializes memblock, which is used * to manage page allocation until mem_init is called.
*/ void __init setup_arch(char **cmdline_p)
{
kasan_init();
*cmdline_p = boot_command_line;
/* Set a half-reasonable default so udelay does something sensible */
loops_per_jiffy = 500000000 / HZ;
/* Unflatten the device-tree passed by prom_init or kexec */
unflatten_device_tree();
/* * Initialize cache line/block info from device-tree (on ppc64) or * just cputable (on ppc32).
*/
initialize_cache_info();
/* Initialize RTAS if available. */
rtas_initialize();
/* Check if we have an initrd provided via the device-tree. */
check_for_initrd();
/* Probe the machine type, establish ppc_md. */
probe_machine();
/* Setup panic notifier if requested by the platform. */
setup_panic();
/* * Configure ppc_md.power_save (ppc32 only, 64-bit machines do * it from their respective probe() function.
*/
setup_power_save();
/* Discover standard serial ports. */
find_legacy_serial_ports();
/* Register early console with the printk subsystem. */
register_early_udbg_console();
/* Setup the various CPU maps based on the device-tree. */
smp_setup_cpu_maps();
/* Initialize xmon. */
xmon_setup();
/* Check the SMT related command line arguments (ppc64). */
check_smt_enabled();
/* * Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids. * * Freescale Book3e parts spin in a loop provided by firmware, * so smp_release_cpus() does nothing for them.
*/ #ifdef CONFIG_SMP
smp_setup_pacas();
/* On BookE, setup per-core TLB data structures. */
setup_tlb_core_data(); #endif
/* Print various info about the machine that has been gathered so far. */
print_system_info();
klp_init_thread_info(&init_task);
setup_initial_init_mm(_stext, _etext, _edata, _end); /* sched_init() does the mmgrab(&init_mm) for the primary CPU */
VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(&init_mm));
inc_mm_active_cpus(&init_mm);
mm_iommu_init(&init_mm);
/* * Reserve large chunks of memory for use by CMA for fadump, KVM and * hugetlb. These must be called after initmem_init(), so that * pageblock_order is initialised.
*/
fadump_cma_init();
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();
/* Initialize the MMU context management stuff. */
mmu_context_init();
/* Interrupt code needs to be 64K-aligned. */ if (IS_ENABLED(CONFIG_PPC64) && (unsignedlong)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsignedlong)_stext);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.17 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.