/* * Some code and data needs to stay below 2 GB, even when the kernel would be * relocated above 2 GB, because it has to use 31 bit addresses. * Such code and data is part of the .amode31 section.
*/ char __amode31_ref *__samode31 = _samode31; char __amode31_ref *__eamode31 = _eamode31; char __amode31_ref *__stext_amode31 = _stext_amode31; char __amode31_ref *__etext_amode31 = _etext_amode31; struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
/* * Control registers CR2, CR5 and CR15 are initialized with addresses * of tables that must be placed below 2G which is handled by the AMODE31 * sections. * Because the AMODE31 sections are relocated below 2G at startup, * the content of control registers CR2, CR5 and CR15 must be updated * with new addresses after the relocation. The initial initialization of * control registers occurs in head64.S and then gets updated again after AMODE31 * relocation. We must access the relevant AMODE31 tables indirectly via * pointers placed in the .amode31.refs linker section. Those pointers get * updated automatically during AMODE31 relocation and always contain a valid * address within AMODE31 sections.
*/
/* An array with a pointer to the lowcore of every CPU. */ struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
/* * The Write Back bit position in the physaddr is given by the SLPC PCI. * Leaving the mask zero always uses write through which is safe
*/ unsignedlong mio_wb_bit_mask __ro_after_init;
/* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup * using address 0x10400 ...
*/
#include <asm/setup.h>
/* * condev= and conmode= setup parameter.
*/
staticint __init condev_setup(char *str)
{ int vdev;
if (machine_is_vm()) {
cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
ptr = strstr(query_buffer, "SUBCHANNEL =");
console_irq = simple_strtoul(ptr + 13, NULL, 16);
cpcmd("QUERY TERM", query_buffer, 1024, NULL);
ptr = strstr(query_buffer, "CONMODE"); /* * Set the conmode to 3215 so that the device recognition * will set the cu_type of the console to 3215. If the * conmode is 3270 and we don't set it back then both * 3215 and the 3270 driver will try to access the console * device (3215 as console and 3270 as normal tty).
*/
cpcmd("TERM CONMODE 3215", NULL, 0, NULL); if (ptr == NULL) { #ifdefined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP; #endif return;
} if (str_has_prefix(ptr + 8, "3270")) { #ifdefined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270; #elifdefined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215; #elifdefined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP; #endif
} elseif (str_has_prefix(ptr + 8, "3215")) { #ifdefined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215; #elifdefined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270; #elifdefined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP; #endif
}
} elseif (machine_is_kvm()) { if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220; elseif (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP; else
SET_CONSOLE_HVC;
} else { #ifdefined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP; #endif
}
}
/* * Reboot, halt and power_off stubs. They just call _machine_restart, * _machine_halt or _machine_power_off.
*/
void machine_restart(char *command)
{ if ((!in_interrupt() && !in_atomic()) || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_restart(command);
}
void machine_halt(void)
{ if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_halt();
}
void machine_power_off(void)
{ if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_power_off();
}
/* * Dummy power off function.
*/ void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
res->name = "System RAM";
res->start = start; /* * In memblock, end points to the first byte after the * range while in resources, end points to the last byte in * the range.
*/
res->end = end - 1;
request_resource(&iomem_resource, res);
for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
std_res = standard_resources[j]; if (std_res->start < res->start ||
std_res->start > res->end) continue; if (std_res->end > res->end) {
sub_res = memblock_alloc_or_panic(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
request_resource(res, sub_res);
} else {
request_resource(res, std_res);
}
}
} #ifdef CONFIG_CRASH_DUMP /* * Re-add removed crash kernel memory as reserved memory. This makes * sure it will be mapped with the identity mapping and struct pages * will be created, so it can be resized later on. * However add it later since the crash kernel resource should not be * part of the System RAM resource.
*/ if (crashk_res.end) {
memblock_add_node(crashk_res.start, resource_size(&crashk_res),
0, MEMBLOCK_NONE);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
} #endif
}
staticvoid __init setup_memory_end(void)
{
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
#ifdef CONFIG_CRASH_DUMP
/* * When kdump is enabled, we have to ensure that no memory from the area * [0 - crashkernel memory size] is set offline - it will be exchanged with * the crashkernel memory region when kdump is triggered. The crashkernel * memory region can never get offlined (pages are unmovable).
*/ staticint kdump_mem_notifier(struct notifier_block *nb, unsignedlong action, void *data)
{ struct memory_notify *arg = data;
if (action != MEM_GOING_OFFLINE) return NOTIFY_OK; if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) return NOTIFY_BAD; return NOTIFY_OK;
}
if (memblock.memory.regions[0].size < crash_size) {
pr_info("crashkernel reservation failed: %s\n", "first memory chunk must be at least crashkernel size"); return;
}
low = crash_base ?: oldmem_data.start;
high = low + crash_size; if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) { /* The crashkernel fits into OLDMEM, reuse OLDMEM */
crash_base = low;
} else { /* Find suitable area in free memory */
low = max_t(unsignedlong, crash_size, sclp.hsa_size);
high = crash_base ? crash_base + crash_size : ULONG_MAX;
/* * Reserve the memory area used to pass the certificate lists
*/ staticvoid __init reserve_certificate_list(void)
{ if (ipl_cert_list_addr)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
/* Move original AMODE31 section to the new one */
memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size); /* Zero out the old AMODE31 section to catch invalid accesses within it */
memset(__samode31, 0, amode31_size);
/* Update all AMODE31 region references */ for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
*ptr += amode31_offset;
}
/* This must be called after AMODE31 relocation */ staticvoid __init setup_cr(void)
{ union ctlreg2 cr2; union ctlreg5 cr5; union ctlreg15 cr15;
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
static_branch_enable(&s390_arch_random_available);
}
/* * Issue diagnose 318 to set the control program name and * version codes.
*/ staticvoid __init setup_control_program_code(void)
{ union diag318_info diag318_info = {
.cpnc = CPNC_LINUX,
.cpvc = 0,
};
/* * Setup function called from init/main.c just after the banner * was printed.
*/
void __init setup_arch(char **cmdline_p)
{ /* * print what head.S has found out about the machine
*/ if (machine_is_vm())
pr_info("Linux is running as a z/VM " "guest operating system in 64-bit mode\n"); elseif (machine_is_kvm())
pr_info("Linux is running under KVM in 64-bit mode\n"); elseif (machine_is_lpar())
pr_info("Linux is running natively in 64-bit mode\n"); else
pr_info("Linux is running as a guest in 64-bit mode\n"); /* Print decompressor messages if not already printed */ if (!boot_earlyprintk)
boot_rb_foreach(print_rb_entry);
if (machine_has_relocated_lowcore())
pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
log_component_list();
/* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
/* Do some memory reservations *before* memory is added to memblock */
reserve_pgtables();
reserve_lowcore();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
reserve_physmem_info();
memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
memblock_add_physmem_info();
relocate_amode31_section();
setup_cr();
setup_uv();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve(); if (cpu_has_edat2())
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
reserve_crashkernel(); #ifdef CONFIG_CRASH_DUMP /* * Be aware that smp_save_dump_secondary_cpus() triggers a system reset. * Therefore CPU and device initialization should be done afterwards.
*/
smp_save_dump_secondary_cpus(); #endif
/* * After paging_init created the kernel page table, the new PSWs * in lowcore can now run with DAT enabled.
*/ #ifdef CONFIG_CRASH_DUMP
smp_save_dump_ipl_cpu(); #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.