// SPDX-License-Identifier: GPL-2.0-or-later /* * Firmware Assisted dump: A robust mechanism to get reliable kernel crash * dump with assistance from firmware. This approach does not use kexec, * instead firmware assists in booting the kdump kernel while preserving * memory contents. The most of the code implementation has been adapted * from phyp assisted dump implementation written by Linas Vepstas and * Manish Ahuja * * Copyright 2011 IBM Corporation * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
*/
/* * The CPU who acquired the lock to trigger the fadump crash should * wait for other CPUs to enter. * * The timeout is in milliseconds.
*/ #define CRASH_TIMEOUT 500
/* * fadump_cma_init() - Initialize CMA area from a fadump reserved memory * * This function initializes CMA area from fadump reserved memory. * The total size of fadump reserved memory covers for boot memory size * + cpu data size + hpte size and metadata. * Initialize only the area equivalent to boot memory size for CMA use. * The remaining portion of fadump reserved memory will be not given * to CMA and pages for those will stay reserved. boot memory size is * aligned per CMA requirement to satisy cma_init_reserved_mem() call. * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump.
*/ void __init fadump_cma_init(void)
{ unsignedlonglong base, size, end; int rc;
if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled ||
fw_dump.dump_active) return; /* * Do not use CMA if user has provided fadump=nocma kernel parameter.
*/ if (fw_dump.nocma || !fw_dump.boot_memory_size) return;
/* * [base, end) should be reserved during early init in * fadump_reserve_mem(). No need to check this here as * cma_init_reserved_mem() already checks for overlap. * Here we give the aligned chunk of this reserved memory to CMA.
*/
base = fw_dump.reserve_dump_area_start;
size = fw_dump.boot_memory_size;
end = base + size;
base = ALIGN(base, CMA_MIN_ALIGNMENT_BYTES);
end = ALIGN_DOWN(end, CMA_MIN_ALIGNMENT_BYTES);
size = end - base;
if (end <= base) {
pr_warn("%s: Too less memory to give to CMA\n", __func__); return;
}
rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma); if (rc) {
pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc); /* * Though the CMA init has failed we still have memory * reservation with us. The reserved memory will be * blocked from production system usage. Hence return 1, * so that we can continue with fadump.
*/ return;
}
/* * If CMA activation fails, keep the pages reserved, instead of * exposing them to buddy allocator. Same as 'fadump=nocma' case.
*/
cma_reserve_pages_on_error(fadump_cma);
/* * So we now have successfully initialized cma area for fadump.
*/
pr_info("Initialized [0x%llx, %luMB] cma area from [0x%lx, %luMB] " "bytes of memory reserved for firmware-assisted dump\n",
cma_get_base(fadump_cma), cma_get_size(fadump_cma) >> 20,
fw_dump.reserve_dump_area_start,
fw_dump.boot_memory_size >> 20); return;
} #endif/* CONFIG_CMA */
/* * Additional parameters meant for capture kernel are placed in a dedicated area. * If this is capture kernel boot, append these parameters to bootargs.
*/ void __init fadump_append_bootargs(void)
{ char *append_args;
size_t len;
if (!fw_dump.dump_active || !fw_dump.param_area_supported || !fw_dump.param_area) return;
if (fw_dump.param_area < fw_dump.boot_mem_top) { if (memblock_reserve(fw_dump.param_area, COMMAND_LINE_SIZE)) {
pr_warn("WARNING: Can't use additional parameters area!\n");
fw_dump.param_area = 0; return;
}
}
append_args = (char *)fw_dump.param_area;
len = strlen(boot_command_line);
/* * Too late to fail even if cmdline size exceeds. Truncate additional parameters * to cmdline size and proceed anyway.
*/ if (len + strlen(append_args) >= COMMAND_LINE_SIZE - 1)
pr_warn("WARNING: Appending parameters exceeds cmdline size. Truncating!\n");
/* * If fadump is registered, check if the memory provided * falls within boot memory area and reserved memory area.
*/ int is_fadump_memory_area(u64 addr, unsignedlong size)
{
u64 d_start, d_end;
int should_fadump_crash(void)
{ if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr) return 0; return 1;
}
int is_fadump_active(void)
{ return fw_dump.dump_active;
}
/* * Returns true, if there are no holes in memory area between d_start to d_end, * false otherwise.
*/ staticbool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
{
phys_addr_t reg_start, reg_end; bool ret = false;
u64 i, start, end;
for_each_mem_range(i, ®_start, ®_end) {
start = max_t(u64, d_start, reg_start);
end = min_t(u64, d_end, reg_end); if (d_start < end) { /* Memory hole from d_start to start */ if (start > d_start) break;
if (end == d_end) {
ret = true; break;
}
d_start = end + 1;
}
}
return ret;
}
/* * Returns true, if there are no holes in reserved memory area, * false otherwise.
*/ bool is_fadump_reserved_mem_contiguous(void)
{
u64 d_start, d_end;
/* Print firmware assisted dump configurations for debugging purpose. */ staticvoid __init fadump_show_config(void)
{ int i;
pr_debug("Support for firmware-assisted dump (fadump): %s\n",
(fw_dump.fadump_supported ? "present" : "no support"));
if (!fw_dump.fadump_supported) return;
pr_debug("Fadump enabled : %s\n", str_yes_no(fw_dump.fadump_enabled));
pr_debug("Dump Active : %s\n", str_yes_no(fw_dump.dump_active));
pr_debug("Dump section sizes:\n");
pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
pr_debug(" Boot memory size : %lx\n", fw_dump.boot_memory_size);
pr_debug(" Boot memory top : %llx\n", fw_dump.boot_mem_top);
pr_debug("Boot memory regions cnt: %llx\n", fw_dump.boot_mem_regs_cnt); for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
pr_debug("[%03d] base = %llx, size = %llx\n", i,
fw_dump.boot_mem_addr[i], fw_dump.boot_mem_sz[i]);
}
}
/** * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM * * Function to find the largest memory size we need to reserve during early * boot process. This will be the size of the memory that is required for a * kernel to boot successfully. * * This function has been taken from phyp-assisted dump feature implementation. * * returns larger of 256MB or 5% rounded down to multiples of 256MB. * * TODO: Come up with better approach to find out more accurate memory size * that is required for a kernel to boot successfully. *
*/ static __init u64 fadump_calculate_reserve_size(void)
{
u64 base, size, bootmem_min; int ret;
if (fw_dump.reserve_bootvar)
pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
/* * Check if the size is specified through crashkernel= cmdline * option. If yes, then use that but ignore base as fadump reserves * memory at a predefined offset.
*/
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&size, &base, NULL, NULL, NULL); if (ret == 0 && size > 0) { unsignedlong max_size;
if (fw_dump.reserve_bootvar)
pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
fw_dump.reserve_bootvar = (unsignedlong)size;
/* * Adjust if the boot memory size specified is above * the upper limit.
*/
max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO; if (fw_dump.reserve_bootvar > max_size) {
fw_dump.reserve_bootvar = max_size;
pr_info("Adjusted boot memory size to %luMB\n",
(fw_dump.reserve_bootvar >> 20));
}
return fw_dump.reserve_bootvar;
} elseif (fw_dump.reserve_bootvar) { /* * 'fadump_reserve_mem=' is being used to reserve memory * for firmware-assisted dump.
*/ return fw_dump.reserve_bootvar;
}
/* divide by 20 to get 5% of value */
size = memblock_phys_mem_size() / 20;
/* round it down in multiples of 256 */
size = size & ~0x0FFFFFFFUL;
/* Truncate to memory_limit. We don't want to over reserve the memory.*/ if (memory_limit && size > memory_limit)
size = memory_limit;
/* * Calculate the total memory size required to be reserved for * firmware-assisted dump registration.
*/ staticunsignedlong __init get_fadump_area_size(void)
{ unsignedlong size = 0;
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size; /* * Account for pagesize alignment of boot memory area destination address. * This faciliates in mmap reading of first kernel's memory.
*/
size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
/* This is to hold kernel metadata on platforms that support it */
size += (fw_dump.ops->fadump_get_metadata_size ?
fw_dump.ops->fadump_get_metadata_size() : 0); return size;
}
staticint __init add_boot_mem_region(unsignedlong rstart, unsignedlong rsize)
{ int max_boot_mem_rgns = fw_dump.ops->fadump_max_boot_mem_rgns(); int i = fw_dump.boot_mem_regs_cnt++;
/* * Firmware usually has a hard limit on the data it can copy per region. * Honour that by splitting a memory range into multiple regions.
*/ staticint __init add_boot_mem_regions(unsignedlong mstart, unsignedlong msize)
{ unsignedlong rstart, rsize, max_size; int ret = 1;
/* * Returns true, if the given range overlaps with reserved memory ranges * starting at idx. Also, updates idx to index of overlapping memory range * with the given memory range. * False, otherwise.
*/ staticbool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
{ bool ret = false; int i;
for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
u64 rbase = reserved_mrange_info.mem_ranges[i].base;
u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
if (end <= rbase) break;
if ((end > rbase) && (base < rend)) {
*idx = i;
ret = true; break;
}
}
return ret;
}
/* * Locate a suitable memory area to reserve memory for FADump. While at it, * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
*/ static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
{ struct fadump_memory_range *mrngs;
phys_addr_t mstart, mend; int idx = 0;
u64 i, ret = 0;
while ((mend > base) && ((mend - base) >= size)) { if (!overlaps_reserved_ranges(base, base+size, &idx)) {
ret = base; goto out;
}
base = mrngs[idx].base + mrngs[idx].size;
base = PAGE_ALIGN(base);
}
}
out: return ret;
}
int __init fadump_reserve_mem(void)
{
u64 base, size, mem_boundary, bootmem_min; int ret = 1;
if (!fw_dump.fadump_enabled) return 0;
if (!fw_dump.fadump_supported) {
pr_info("Firmware-Assisted Dump is not supported on this hardware\n"); goto error_out;
}
/* * Initialize boot memory size * If dump is active then we have already calculated the size during * first kernel.
*/ if (!fw_dump.dump_active) {
fw_dump.boot_memory_size =
PAGE_ALIGN(fadump_calculate_reserve_size());
bootmem_min = fw_dump.ops->fadump_get_bootmem_min(); if (fw_dump.boot_memory_size < bootmem_min) {
pr_err("Can't enable fadump with boot memory size (0x%lx) less than 0x%llx\n",
fw_dump.boot_memory_size, bootmem_min); goto error_out;
}
if (!fadump_get_boot_mem_regions()) {
pr_err("Too many holes in boot memory area to enable fadump\n"); goto error_out;
}
}
if (memory_limit)
mem_boundary = memory_limit; else
mem_boundary = memblock_end_of_DRAM();
base = fw_dump.boot_mem_top;
size = get_fadump_area_size();
fw_dump.reserve_dump_area_size = size; if (fw_dump.dump_active) {
pr_info("Firmware-assisted dump is active.\n");
#ifdef CONFIG_HUGETLB_PAGE /* * FADump capture kernel doesn't care much about hugepages. * In fact, handling hugepages in capture kernel is asking for * trouble. So, disable HugeTLB support when fadump is active.
*/
hugetlb_disabled = true; #endif /* * If last boot has crashed then reserve all the memory * above boot memory size so that we don't touch it until * dump is written to disk by userspace tool. This memory * can be released for general use by invalidating fadump.
*/
fadump_reserve_crash_area(base);
pr_debug("fadumphdr_addr = %#016lx\n", fw_dump.fadumphdr_addr);
pr_debug("Reserve dump area start address: 0x%lx\n",
fw_dump.reserve_dump_area_start);
} else { /* * Reserve memory at an offset closer to bottom of the RAM to * minimize the impact of memory hot-remove operation.
*/
base = fadump_locate_reserve_mem(base, size);
if (!base || (base + size > mem_boundary)) {
pr_err("Failed to find memory chunk for reservation!\n"); goto error_out;
}
fw_dump.reserve_dump_area_start = base;
/* * Calculate the kernel metadata address and register it with * f/w if the platform supports.
*/ if (fw_dump.ops->fadump_setup_metadata &&
(fw_dump.ops->fadump_setup_metadata(&fw_dump) < 0)) goto error_out;
if (memblock_reserve(base, size)) {
pr_err("Failed to reserve memory!\n"); goto error_out;
}
pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
(size >> 20), base, (memblock_phys_mem_size() >> 20));
}
/* * Look for fadump_reserve_mem= cmdline option * TODO: Remove references to 'fadump_reserve_mem=' parameter, * the sooner 'crashkernel=' parameter is accustomed to.
*/ staticint __init early_fadump_reserve_mem(char *p)
{ if (p)
fw_dump.reserve_bootvar = memparse(p, &p); return 0;
}
early_param("fadump_reserve_mem", early_fadump_reserve_mem);
void crash_fadump(struct pt_regs *regs, constchar *str)
{ unsignedint msecs; struct fadump_crash_info_header *fdh = NULL; int old_cpu, this_cpu; /* Do not include first CPU */ unsignedint ncpus = num_online_cpus() - 1;
if (!should_fadump_crash()) return;
/* * old_cpu == -1 means this is the first CPU which has come here, * go ahead and trigger fadump. * * old_cpu != -1 means some other CPU has already on its way * to trigger fadump, just keep looping here.
*/
this_cpu = smp_processor_id();
old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
if (old_cpu != -1) {
atomic_inc(&cpus_in_fadump);
/* * We can't loop here indefinitely. Wait as long as fadump * is in force. If we race with fadump un-registration this * loop will break and then we go down to normal panic path * and reboot. If fadump is in force the first crashing * cpu will definitely trigger fadump.
*/ while (fw_dump.dump_registered)
cpu_relax(); return;
}
if (regs)
fdh->regs = *regs; else
ppc_save_regs(&fdh->regs);
fdh->cpu_mask = *cpu_online_mask;
/* * If we came in via system reset, wait a while for the secondary * CPUs to enter.
*/ if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
msecs = CRASH_TIMEOUT; while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
mdelay(1);
}
memset(&prstatus, 0, sizeof(prstatus)); /* * FIXME: How do i get PID? Do I really need it? * prstatus.pr_pid = ????
*/
elf_core_copy_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus)); return buf;
}
/* * Fold adjacent memory ranges to bring down the memory ranges/ * PT_LOAD segments count.
*/ if (mrange_info->mem_range_cnt) {
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
/* * Boot memory area needs separate PT_LOAD segment(s) as it * is moved to a different location at the time of crash. * So, fold only if the region is not boot memory area.
*/ if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
} if (!is_adjacent) { /* resize the array on reaching the limit */ if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { int ret;
if (mrange_info->is_static) {
pr_err("Reached array size limit for %s memory ranges\n",
mrange_info->name); return -ENOSPC;
}
ret = fadump_alloc_mem_ranges(mrange_info); if (ret) return ret;
/* Update to the new resized array */
mem_ranges = mrange_info->mem_ranges;
}
/* * If the given physical address falls within the boot memory region then * return the relocated address that points to the dump region reserved * for saving initial boot memory contents.
*/ staticinlineunsignedlong fadump_relocate(unsignedlong paddr)
{ unsignedlong raddr, rstart, rend, rlast, hole_size; int i;
hole_size = 0;
rlast = 0;
raddr = paddr; for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
rstart = fw_dump.boot_mem_addr[i];
rend = rstart + fw_dump.boot_mem_sz[i];
hole_size += (rstart - rlast);
/* * Set up ELF PT_NOTE, a placeholder for CPU notes information. * The notes info will be populated later by platform-specific code. * Hence, this PT_NOTE will always be the first ELF note. * * NOTE: Any new ELF note addition should be placed after this note.
*/
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
phdr->p_type = PT_NOTE;
phdr->p_flags = 0;
phdr->p_vaddr = 0;
phdr->p_align = 0;
phdr->p_offset = 0;
phdr->p_paddr = 0;
phdr->p_filesz = 0;
phdr->p_memsz = 0; /* Increment number of program headers. */
(elf->e_phnum)++;
/* * Setup PT_LOAD sections. first include boot memory regions * and then add rest of the memory regions.
*/
boot_mem_dest_offset = fw_dump.boot_mem_dest_addr; for (i = 0; i < fw_dump.boot_mem_regs_cnt; i++) {
phdr = (struct elf_phdr *)bufp;
bufp += sizeof(struct elf_phdr);
populate_elf_pt_load(phdr, fw_dump.boot_mem_addr[i],
fw_dump.boot_mem_sz[i],
boot_mem_dest_offset); /* Increment number of program headers. */
(elf->e_phnum)++;
boot_mem_dest_offset += fw_dump.boot_mem_sz[i];
}
/* Memory reserved for fadump in first kernel */
ra_start = fw_dump.reserve_dump_area_start;
ra_size = get_fadump_area_size();
ra_end = ra_start + ra_size;
phdr = (struct elf_phdr *)bufp;
for_each_mem_range(i, &mstart, &mend) { /* Boot memory regions already added, skip them now */ if (mstart < fw_dump.boot_mem_top) { if (mend > fw_dump.boot_mem_top)
mstart = fw_dump.boot_mem_top; else continue;
}
/* Handle memblock regions overlaps with fadump reserved area */ if ((ra_start < mend) && (ra_end > mstart)) { if ((mstart < ra_start) && (mend > ra_end)) {
populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart); /* Increment number of program headers. */
(elf->e_phnum)++;
bufp += sizeof(struct elf_phdr);
phdr = (struct elf_phdr *)bufp;
populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end);
} elseif (mstart < ra_start) {
populate_elf_pt_load(phdr, mstart, ra_start - mstart, mstart);
} elseif (ra_end < mend) {
populate_elf_pt_load(phdr, ra_end, mend - ra_end, ra_end);
}
} else { /* No overlap with fadump reserved memory region */
populate_elf_pt_load(phdr, mstart, mend - mstart, mstart);
}
/* Increment number of program headers. */
(elf->e_phnum)++;
bufp += sizeof(struct elf_phdr);
phdr = (struct elf_phdr *) bufp;
}
}
memset(fdh, 0, sizeof(struct fadump_crash_info_header));
fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
fdh->version = FADUMP_HEADER_VERSION; /* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
/* * The physical address and size of vmcoreinfo are required in the * second kernel to prepare elfcorehdr.
*/
fdh->vmcoreinfo_raddr = fadump_relocate(paddr_vmcoreinfo_note());
fdh->vmcoreinfo_size = VMCOREINFO_NOTE_SIZE;
fdh->pt_regs_sz = sizeof(struct pt_regs); /* * When LPAR is terminated by PYHP, ensure all possible CPUs' * register data is processed while exporting the vmcore.
*/
fdh->cpu_mask = *cpu_possible_mask;
fdh->cpu_mask_sz = sizeof(struct cpumask);
/* * If no memory is reserved then we can not register for firmware- * assisted dump.
*/ if (!fw_dump.reserve_dump_area_size) return -ENODEV;
addr = fw_dump.fadumphdr_addr;
/* Initialize fadump crash info header. */
addr = init_fadump_header(addr);
/* register the future kernel dump with firmware. */
pr_debug("Registering for firmware-assisted kernel dump...\n"); return fw_dump.ops->fadump_register(&fw_dump);
}
void fadump_cleanup(void)
{ if (!fw_dump.fadump_supported) return;
/* Invalidate the registration only if dump is active. */ if (fw_dump.dump_active) {
pr_debug("Invalidating firmware-assisted dump registration\n");
fw_dump.ops->fadump_invalidate(&fw_dump);
} elseif (fw_dump.dump_registered) { /* Un-register Firmware-assisted dump if it was registered. */
fw_dump.ops->fadump_unregister(&fw_dump);
}
if (fw_dump.ops->fadump_cleanup)
fw_dump.ops->fadump_cleanup(&fw_dump);
}
if (tstart < tend) {
fadump_free_reserved_memory(tstart, tend);
if (tend == epfn) break;
spfn = tend;
}
}
}
/* * Sort the mem ranges in-place and merge adjacent ranges * to minimize the memory ranges count.
*/ staticvoid sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{ struct fadump_memory_range *mem_ranges;
u64 base, size; int i, j, idx;
if (!reserved_mrange_info.mem_range_cnt) return;
/* Sort the memory ranges */
mem_ranges = mrange_info->mem_ranges; for (i = 0; i < mrange_info->mem_range_cnt; i++) {
idx = i; for (j = (i + 1); j < mrange_info->mem_range_cnt; j++) { if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
} if (idx != i)
swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
idx = 0; for (i = 1; i < mrange_info->mem_range_cnt; i++) {
base = mem_ranges[i-1].base;
size = mem_ranges[i-1].size; if (mem_ranges[i].base == (base + size))
mem_ranges[idx].size += mem_ranges[i].size; else {
idx++; if (i == idx) continue;
/* * Scan reserved-ranges to consider them while reserving/releasing * memory for FADump.
*/ staticvoid __init early_init_dt_scan_reserved_ranges(unsignedlong node)
{ const __be32 *prop; int len, ret = -1; unsignedlong i;
/* reserved-ranges already scanned */ if (reserved_mrange_info.mem_range_cnt != 0) return;
prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); if (!prop) return;
/* * Each reserved range is an (address,size) pair, 2 cells each, * totalling 4 cells per range.
*/ for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
u64 base, size;
base = of_read_number(prop + (i * 4) + 0, 2);
size = of_read_number(prop + (i * 4) + 2, 2);
if (size) {
ret = fadump_add_mem_range(&reserved_mrange_info,
base, base + size); if (ret < 0) {
pr_warn("some reserved ranges are ignored!\n"); break;
}
}
}
/* * Release the memory that was reserved during early boot to preserve the * crash'ed kernel's memory contents except reserved dump area (permanent * reservation) and reserved ranges used by F/W. The released memory will * be available for general use.
*/ staticvoid fadump_release_memory(u64 begin, u64 end)
{
u64 ra_start, ra_end, tstart; int i, ret;
/* * If reserved ranges array limit is hit, overwrite the last reserved * memory range with reserved dump area to ensure it is excluded from * the memory being released (reused for next FADump registration).
*/ if (reserved_mrange_info.mem_range_cnt ==
reserved_mrange_info.max_mem_ranges)
reserved_mrange_info.mem_range_cnt--;
ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); if (ret != 0) return;
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);
/* Exclude reserved ranges and release remaining memory */
tstart = begin; for (i = 0; i < reserved_mrange_info.mem_range_cnt; i++) {
ra_start = reserved_mrange_info.mem_ranges[i].base;
ra_end = ra_start + reserved_mrange_info.mem_ranges[i].size;
if (tstart >= ra_end) continue;
if (tstart < ra_start)
fadump_release_reserved_area(tstart, ra_start);
tstart = ra_end;
}
if (tstart < end)
fadump_release_reserved_area(tstart, end);
}
/* * Before freeing the memory of `elfcorehdr`, reset the global * `elfcorehdr_addr` to prevent modules like `vmcore` from accessing * invalid memory.
*/
elfcorehdr_addr = ELFCORE_ADDR_ERR;
fadump_free_buffer(fw_dump.elfcorehdr_addr, fw_dump.elfcorehdr_size);
fw_dump.elfcorehdr_addr = 0;
fw_dump.elfcorehdr_size = 0;
}
if (input == 1) { /* * Take away the '/proc/vmcore'. We are releasing the dump * memory, hence it will not be valid anymore.
*/ #ifdef CONFIG_PROC_VMCORE
vmcore_cleanup(); #endif
fadump_invalidate_release_mem();
} else return -EINVAL; return count;
}
/* Release the reserved memory and disable the FADump */ staticvoid __init unregister_fadump(void)
{
fadump_cleanup();
fadump_release_memory(fw_dump.reserve_dump_area_start,
fw_dump.reserve_dump_area_size);
fw_dump.fadump_enabled = 0;
kobject_put(fadump_kobj);
}
if (!fw_dump.fadump_enabled || fw_dump.dump_active) return -EPERM;
if (count >= COMMAND_LINE_SIZE) return -EINVAL;
/* * Fail here instead of handling this scenario with * some silly workaround in capture kernel.
*/ if (saved_command_line_len + count >= COMMAND_LINE_SIZE) {
pr_err("Appending parameters exceeds cmdline size!\n"); return -ENOSPC;
}
params = __va(fw_dump.param_area);
strscpy_pad(params, buf, COMMAND_LINE_SIZE); /* Remove newline character at the end. */ if (params[count-1] == '\n')
params[count-1] = '\0';
return count;
}
static ssize_t registered_store(struct kobject *kobj, struct kobj_attribute *attr, constchar *buf, size_t count)
{ int ret = 0; int input = -1;
if (!fw_dump.fadump_enabled || fw_dump.dump_active) return -EPERM;
if (kstrtoint(buf, 0, &input)) return -EINVAL;
mutex_lock(&fadump_mutex);
switch (input) { case 0: if (fw_dump.dump_registered == 0) { goto unlock_out;
}
/* Un-register Firmware-assisted dump */
pr_debug("Un-register firmware-assisted dump\n");
fw_dump.ops->fadump_unregister(&fw_dump); break; case 1: if (fw_dump.dump_registered == 1) { /* Un-register Firmware-assisted dump */
fw_dump.ops->fadump_unregister(&fw_dump);
} /* Register Firmware-assisted dump */
ret = register_fadump(); break; default:
ret = -EINVAL; break;
}
unlock_out:
mutex_unlock(&fadump_mutex); return ret < 0 ? ret : count;
}
if (fw_dump.dump_active) {
rc = sysfs_create_file(fadump_kobj, &release_attr.attr); if (rc)
pr_err("unable to create release_mem sysfs file (%d)\n",
rc);
}
rc = sysfs_create_groups(fadump_kobj, fadump_groups); if (rc) {
pr_err("sysfs group creation failed (%d), unregistering FADump",
rc);
unregister_fadump(); return;
}
/* * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to * create symlink at old location to maintain backward compatibility. * * - fadump_enabled -> fadump/enabled * - fadump_registered -> fadump/registered * - fadump_release_mem -> fadump/release_mem
*/
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj, "enabled", "fadump_enabled"); if (rc) {
pr_err("unable to create fadump_enabled symlink (%d)", rc); return;
}
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj, "registered", "fadump_registered"); if (rc) {
pr_err("unable to create fadump_registered symlink (%d)", rc);
sysfs_remove_link(kernel_kobj, "fadump_enabled"); return;
}
if (fw_dump.dump_active) {
rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
fadump_kobj, "release_mem", "fadump_release_mem"); if (rc)
pr_err("unable to create fadump_release_mem symlink (%d)",
rc);
} return;
}
staticint __init fadump_setup_elfcorehdr_buf(void)
{ int elf_phdr_cnt; unsignedlong elfcorehdr_size;
/* * Program header for CPU notes comes first, followed by one for * vmcoreinfo, and the remaining program headers correspond to * memory regions.
*/
elf_phdr_cnt = 2 + fw_dump.boot_mem_regs_cnt + memblock_num_regions(memory);
elfcorehdr_size = sizeof(struct elfhdr) + (elf_phdr_cnt * sizeof(struct elf_phdr));
elfcorehdr_size = PAGE_ALIGN(elfcorehdr_size);
fw_dump.elfcorehdr_addr = (u64)fadump_alloc_buffer(elfcorehdr_size); if (!fw_dump.elfcorehdr_addr) {
pr_err("Failed to allocate %lu bytes for elfcorehdr\n",
elfcorehdr_size); return -ENOMEM;
}
fw_dump.elfcorehdr_size = elfcorehdr_size; return 0;
}
/* * Check if the fadump header of crashed kernel is compatible with fadump kernel. * * It checks the magic number, endianness, and size of non-primitive type * members of fadump header to ensure safe dump collection.
*/ staticbool __init is_fadump_header_compatible(struct fadump_crash_info_header *fdh)
{ if (fdh->magic_number == FADUMP_CRASH_INFO_MAGIC_OLD) {
pr_err("Old magic number, can't process the dump.\n"); returnfalse;
}
if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { if (fdh->magic_number == swab64(FADUMP_CRASH_INFO_MAGIC))
pr_err("Endianness mismatch between the crashed and fadump kernels.\n"); else
pr_err("Fadump header is corrupted.\n");
returnfalse;
}
/* * Dump collection is not safe if the size of non-primitive type members * of the fadump header do not match between crashed and fadump kernel.
*/ if (fdh->pt_regs_sz != sizeof(struct pt_regs) ||
fdh->cpu_mask_sz != sizeof(struct cpumask)) {
pr_err("Fadump header size mismatch.\n"); returnfalse;
}
fdh = (struct fadump_crash_info_header *) __va(fw_dump.fadumphdr_addr); if (!fdh) {
pr_err("Crash info header is empty.\n"); goto err_out;
}
/* Avoid processing the dump if fadump header isn't compatible */ if (!is_fadump_header_compatible(fdh)) goto err_out;
/* Allocate buffer for elfcorehdr */ if (fadump_setup_elfcorehdr_buf()) goto err_out;
fadump_populate_elfcorehdr(fdh);
/* Let platform update the CPU notes in elfcorehdr */ if (fw_dump.ops->fadump_process(&fw_dump) < 0) goto err_out;
/* * elfcorehdr is now ready to be exported. * * set elfcorehdr_addr so that vmcore module will export the * elfcorehdr through '/proc/vmcore'.
*/
elfcorehdr_addr = virt_to_phys((void *)fw_dump.elfcorehdr_addr); return;
err_out:
fadump_invalidate_release_mem();
}
/* * Reserve memory to store additional parameters to be passed * for fadump/capture kernel.
*/ void __init fadump_setup_param_area(void)
{
phys_addr_t range_start, range_end;
if (!fw_dump.fadump_enabled) return;
if (!fw_dump.param_area_supported || fw_dump.dump_active) return;
/* This memory can't be used by PFW or bootloader as it is shared across kernels */ if (early_radix_enabled()) { /* * Anywhere in the upper half should be good enough as all memory * is accessible in real mode.
*/
range_start = memblock_end_of_DRAM() / 2;
range_end = memblock_end_of_DRAM();
} else { /* * Memory range for passing additional parameters for HASH MMU * must meet the following conditions: * 1. The first memory block size must be higher than the * minimum RMA (MIN_RMA) size. Bootloader can use memory * upto RMA size. So it should be avoided. * 2. The range should be between MIN_RMA and RMA size (ppc64_rma_size) * 3. It must not overlap with the fadump reserved area.
*/ if (ppc64_rma_size < MIN_RMA*1024*1024) return;
fw_dump.param_area = memblock_phys_alloc_range(COMMAND_LINE_SIZE,
COMMAND_LINE_SIZE,
range_start,
range_end); if (!fw_dump.param_area) {
pr_warn("WARNING: Could not setup area to pass additional parameters!\n"); return;
}
/* * Prepare for firmware-assisted dump.
*/ int __init setup_fadump(void)
{ if (!fw_dump.fadump_supported) return 0;
fadump_init_files();
fadump_show_config();
if (!fw_dump.fadump_enabled) return 1;
/* * If dump data is available then see if it is valid and prepare for * saving it to the disk.
*/ if (fw_dump.dump_active) {
fadump_process();
} /* Initialize the kernel dump memory structure and register with f/w */ elseif (fw_dump.reserve_dump_area_size) {
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
register_fadump();
}
/* * In case of panic, fadump is triggered via ppc_panic_event() * panic notifier. Setting crash_kexec_post_notifiers to 'true' * lets panic() function take crash friendly path before panic * notifiers are invoked.
*/
crash_kexec_post_notifiers = true;
return 1;
} /* * Use subsys_initcall_sync() here because there is dependency with * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization * is done before registering with f/w.
*/
subsys_initcall_sync(setup_fadump); #else/* !CONFIG_PRESERVE_FA_DUMP */
/* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsignedlong node, constchar *uname, int depth, void *data)
{ if ((depth != 1) || (strcmp(uname, "ibm,opal") != 0)) return 0;
opal_fadump_dt_scan(&fw_dump, node); return 1;
}
/* * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel, * preserve crash data. The subsequent memory preserving kernel boot * is likely to process this crash data.
*/ int __init fadump_reserve_mem(void)
{ if (fw_dump.dump_active) { /* * If last boot has crashed then reserve all the memory * above boot memory to preserve crash data.
*/
pr_info("Preserving crash data for processing in next boot.\n");
fadump_reserve_crash_area(fw_dump.boot_mem_top);
} else
pr_debug("FADump-aware kernel..\n");
return 1;
} #endif/* CONFIG_PRESERVE_FA_DUMP */
/* Preserve everything above the base address */ staticvoid __init fadump_reserve_crash_area(u64 base)
{
u64 i, mstart, mend, msize;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.