// SPDX-License-Identifier: GPL-2.0-only /* * Based on arch/arm/kernel/setup.c * * Copyright (C) 1995-2001 Russell King * Copyright (C) 2012 ARM Ltd.
*/
struct mpidr_hash mpidr_hash; /** * smp_build_mpidr_hash - Pre-compute shifts required at each affinity * level in order to build a linear index from an * MPIDR value. Resulting algorithm is a collision * free hash carried out through shifting and ORing
*/ staticvoid __init smp_build_mpidr_hash(void)
{
u32 i, affinity, fs[4], bits[4], ls;
u64 mask = 0; /* * Pre-scan the list of MPIDRS and filter out bits that do * not contribute to affinity levels, ie they never toggle.
*/
for_each_possible_cpu(i)
mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
pr_debug("mask of set bits %#llx\n", mask); /* * Find and stash the last and first bit set at all affinity levels to * check how many bits are required to represent them.
*/ for (i = 0; i < 4; i++) {
affinity = MPIDR_AFFINITY_LEVEL(mask, i); /* * Find the MSB bit and LSB bits position * to determine how many bits are required * to express the affinity level.
*/
ls = fls(affinity);
fs[i] = affinity ? ffs(affinity) - 1 : 0;
bits[i] = ls - fs[i];
} /* * An index can be created from the MPIDR_EL1 by isolating the * significant bits at each affinity level and by shifting * them in order to compress the 32 bits values space to a * compressed set of values. This is equivalent to hashing * the MPIDR_EL1 through shifting and ORing. It is a collision free * hash though not minimal since some levels might contain a number * of CPUs that is not an exact power of 2 and their bit * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
*/
mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
(bits[1] + bits[0]);
mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
fs[3] - (bits[2] + bits[1] + bits[0]);
mpidr_hash.mask = mask;
mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
mpidr_hash.shift_aff[0],
mpidr_hash.shift_aff[1],
mpidr_hash.shift_aff[2],
mpidr_hash.shift_aff[3],
mpidr_hash.mask,
mpidr_hash.bits); /* * 4x is an arbitrary value used to warn on a hash table much bigger * than expected on most systems.
*/ if (mpidr_hash_size() > 4 * num_possible_cpus())
pr_warn("Large number of MPIDR hash buckets detected\n");
}
/* * dt_virt is a fixmap address, hence __pa(dt_virt) can't be used. * Pass dt_phys directly.
*/ if (!early_init_dt_scan(dt_virt, dt_phys)) {
pr_crit("\n" "Error: invalid device tree blob: PA=%pa, VA=%px, size=%d bytes\n" "The dtb must be 8-byte aligned and must not exceed 2 MB in size.\n" "\nPlease check your bootloader.\n",
&dt_phys, dt_virt, size);
/* * Note that in this _really_ early stage we cannot even BUG() * or oops, so the least terrible thing to do is cpu_relax(), * or else we could end-up printing non-initialized data, etc.
*/ while (true)
cpu_relax();
}
/* Early fixups are done, map the FDT as read-only now */
fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
name = of_flat_dt_get_machine_name(); if (!name) return;
/* * Initialise the static keys early as they may be enabled by the * cpufeature code and early parameters.
*/
jump_label_init();
parse_early_param();
dynamic_scs_init();
/* * The primary CPU enters the kernel with all DAIF exceptions masked. * * We must unmask Debug and SError before preemption or scheduling is * possible to ensure that these are consistently unmasked across * threads, and we want to unmask SError as soon as possible after * initializing earlycon so that we can report any SErrors immediately. * * IRQ and FIQ will be unmasked after the root irqchip has been * detected and initialized.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries.
*/
cpu_uninstall_idmap();
xen_early_init();
efi_init();
if (!efi_enabled(EFI_BOOT)) { if ((u64)_text % MIN_KIMG_ALIGN)
pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND,
FW_BUG "Booted with MMU enabled!");
}
arm64_memblock_init();
paging_init();
acpi_table_upgrade();
/* Parse the ACPI tables for possible boot-time configuration */
acpi_boot_table_init();
if (acpi_disabled)
unflatten_device_tree();
bootmem_init();
kasan_init();
request_standard_resources();
early_ioremap_reset();
if (acpi_disabled)
psci_dt_init(); else
psci_acpi_init();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Make sure init_thread_info.ttbr0 always generates translation * faults in case uaccess_enable() is inadvertently called by the init * thread.
*/
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); #endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" "This indicates a broken bootloader or old kernel\n",
boot_args[1], boot_args[2], boot_args[3]);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.