/* * mips_io_port_base is the begin of the address space to which x86 style * I/O ports are mapped.
*/ unsignedlong mips_io_port_base = -1;
EXPORT_SYMBOL(mips_io_port_base);
/* it returns the next free pfn after initrd */ staticunsignedlong __init init_initrd(void)
{ unsignedlong end;
/* * Board specific code or command line parser should have * already set up initrd_start and initrd_end. In these cases * perform sanity checks and use them if all looks good.
*/ if (!initrd_start || initrd_end <= initrd_start) goto disable;
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n"); goto disable;
}
/* * Sanitize initrd addresses. For example firmware * can't guess if they need to pass them through * 64-bits values if the kernel has been built in pure * 32-bit. We need also to switch from KSEG0 to XKPHYS * addresses now, so the code can now safely use __pa().
*/
end = __pa(initrd_end);
initrd_end = (unsignedlong)__va(end);
initrd_start = (unsignedlong)__va(__pa(initrd_start));
/* In some conditions (e.g. big endian bootloader with a little endian kernel), the initrd might appear byte swapped. Try to detect this and
byte swap it if needed. */ staticvoid __init maybe_bswap_initrd(void)
{ #ifdefined(CONFIG_CPU_CAVIUM_OCTEON)
u64 buf;
/* Check for CPIO signature */ if (!memcmp((void *)initrd_start, "070701", 6)) return;
/* Check for compressed initrd */ if (decompress_method((unsignedchar *)initrd_start, 8, NULL)) return;
/* Try again with a byte swapped header */
buf = swab64p((u64 *)initrd_start); if (!memcmp(&buf, "070701", 6) ||
decompress_method((unsignedchar *)(&buf), 8, NULL)) { unsignedlong i;
pr_info("Byteswapped initrd detected\n"); for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
swab64s((u64 *)i);
} #endif
}
if (size == 0) {
printk(KERN_INFO "Initrd not found or empty"); goto disable;
} if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk(KERN_ERR "Initrd extends beyond end of memory"); goto disable;
}
/* * Initialize the bootmem allocator. It also setup initrd related data * if needed.
*/ #ifdefined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
/* * Sanity check any INITRD first. We don't take it into account * for bootmem setup initially, rely on the end-of-kernel-code * as our memory range starting point. Once bootmem is inited we * will reserve the area used for the initrd.
*/
init_initrd();
/* max_low_pfn is not a number of pages but the end pfn of low mem */
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
ARCH_PFN_OFFSET = PFN_UP(ramstart); #else /* * Reserve any memory between the start of RAM and PHYS_OFFSET
*/ if (ramstart > PHYS_OFFSET)
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
min_low_pfn = ARCH_PFN_OFFSET;
max_pfn = PFN_DOWN(ramend);
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { /* * Skip highmem here so we get an accurate max_low_pfn if low * memory stops short of high memory. * If the region overlaps HIGHMEM_START, end is clipped so * max_pfn excludes the highmem portion.
*/ if (start >= PFN_DOWN(HIGHMEM_START)) continue; if (end > PFN_DOWN(HIGHMEM_START))
end = PFN_DOWN(HIGHMEM_START); if (end > max_low_pfn)
max_low_pfn = end;
}
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) { if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { /* * Reserve from the elf core header to the end of * the memory segment, that should all be kdump * reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr; break;
}
}
}
pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
(unsignedlong)elfcorehdr_size >> 10, (unsignedlong)elfcorehdr_addr >> 10);
staticvoid __init request_crashkernel(struct resource *res)
{ int ret;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) return;
if (crashk_res.start == crashk_res.end) return;
ret = request_resource(res, &crashk_res); if (!ret)
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsignedlong)(resource_size(&crashk_res) >> 20),
(unsignedlong)(crashk_res.start >> 20));
}
/* * If CMDLINE_OVERRIDE is enabled then initializing the command line is * trivial - we simply use the built-in command line unconditionally & * unmodified.
*/ if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); return;
}
/* * If the user specified a built-in command line & * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is * prepended to arguments from the bootloader or DT so we'll copy them * to the start of boot_command_line here. Otherwise, empty * boot_command_line to undo anything early_init_dt_scan_chosen() did.
*/ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); else
boot_command_line[0] = 0;
#ifdef CONFIG_OF_EARLY_FLATTREE /* * If we're configured to take boot arguments from DT, look for those * now.
*/ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); #endif
/* * If we didn't get any arguments from DT (regardless of whether that's * because we weren't configured to look for them, or because we looked * & found none) then we'll take arguments from the bootloader. * plat_mem_setup() should have filled arcs_cmdline with arguments from * the bootloader.
*/ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
/* * If the user specified a built-in command line & we didn't already * prepend it, we append it to boot_command_line here.
*/ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
!IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
}
/* * arch_mem_init - initialize memory management subsystem * * o plat_mem_setup() detects the memory configuration and will record detected * memory areas using memblock_add. * * At this stage the memory configuration of the system is known to the * kernel but generic memory management system is still entirely uninitialized. * * o bootmem_init() * o sparse_init() * o paging_init() * o dma_contiguous_reserve() * * At this stage the bootmem allocator is ready to use. * * NOTE: historically plat_mem_setup did the entire platform initialization. * This was rather impractical because it meant plat_mem_setup had to * get away without any kind of memory allocator. To keep old code from * breaking plat_setup was just renamed to plat_mem_setup and a second platform * initialization hook for anything else was introduced.
*/ staticvoid __init arch_mem_init(char **cmdline_p)
{ /* call board setup routine */
plat_mem_setup();
memblock_set_bottom_up(true);
/* * Prevent memblock from allocating high memory. * This cannot be done before max_low_pfn is detected, so up * to this point is possible to only reserve physical memory * with memblock_reserve; memblock_alloc* can be used * only after this point
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
mips_reserve_vmcore();
mips_parse_crashkernel();
device_tree_init();
/* * In order to reduce the possibility of kernel panic when failed to * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate * low memory as small as possible before plat_swiotlb_setup(), so * make sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init();
memblock_set_bottom_up(true);
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Reserve for hibernation. */
memblock_reserve(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES);
res->start = start; /* * In memblock, end points to the first byte after the * range while in resourses, end points to the last byte in * the range.
*/
res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res->name = "System RAM";
request_resource(&iomem_resource, res);
/* * We don't know which RAM region contains kernel data, * so we try it repeatedly and let the resource manager * test it.
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
request_crashkernel(res);
}
}
#ifdef CONFIG_SMP staticvoid __init prefill_possible_map(void)
{ int i, possible = num_possible_cpus();
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
for (i = 0; i < possible; i++)
set_cpu_possible(i, true); for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.