// SPDX-License-Identifier: GPL-2.0-only /* * Architecture specific (i386/x86_64) functions for kexec based crash dumps. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * * Copyright (C) IBM Corporation, 2004. All rights reserved. * Copyright (C) Red Hat Inc., 2014. All rights reserved. * Authors: * Vivek Goyal <vgoyal@redhat.com> *
*/
/* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { struct boot_params *params; /* Type of memory */ unsignedint type;
};
/* Override the weak function in kernel/panic.c */ void crash_smp_send_stop(void)
{ staticint cpus_stopped;
if (cpus_stopped) return;
if (smp_ops.crash_stop_other_cpus)
smp_ops.crash_stop_other_cpus(); else
smp_send_stop();
cpus_stopped = 1;
}
#else void crash_smp_send_stop(void)
{ /* There are no cpus to shootdown */
} #endif
void native_machine_crash_shutdown(struct pt_regs *regs)
{ /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system.
*/ /* The kernel is broken so disable interrupts */
local_irq_disable();
crash_smp_send_stop();
cpu_emergency_disable_virtualization();
/* * Disable Intel PT to stop its logging
*/
cpu_emergency_stop_pt();
#ifdef CONFIG_X86_IO_APIC /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
ioapic_zap_locks();
clear_IO_APIC(); #endif
lapic_shutdown();
restore_boot_irq_mode(); #ifdef CONFIG_HPET_TIMER
hpet_disable(); #endif
/* * Non-crash kexec calls enc_kexec_begin() while scheduling is still * active. This allows the callback to wait until all in-flight * shared<->private conversions are complete. In a crash scenario, * enc_kexec_begin() gets called after all but one CPU have been shut * down and interrupts have been disabled. This allows the callback to * detect a race with the conversion and report it.
*/
x86_platform.guest.enc_kexec_begin();
x86_platform.guest.enc_kexec_finish();
/* Gather all the required information to prepare elf headers for ram regions */ staticstruct crash_mem *fill_up_crash_elf_data(void)
{ unsignedint nr_ranges = 0; struct crash_mem *cmem;
walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); if (!nr_ranges) return NULL;
/* * Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges * may cause range splits. So add extra slots here.
*/
nr_ranges += 2 + crashk_cma_cnt;
cmem = vzalloc(struct_size(cmem, ranges, nr_ranges)); if (!cmem) return NULL;
/* * Look for any unwanted ranges between mstart, mend and remove them. This * might lead to split and split ranges are put in cmem->ranges[] array
*/ staticint elf_header_exclude_ranges(struct crash_mem *cmem)
{ int ret = 0; int i;
/* Exclude the low 1M because it is always reserved */
ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1); if (ret) return ret;
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (ret) return ret;
if (crashk_low_res.end)
ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
crashk_low_res.end); if (ret) return ret;
for (i = 0; i < crashk_cma_cnt; ++i) {
ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
crashk_cma_ranges[i].end); if (ret) return ret;
}
/* Exclude elf header region */
start = image->elf_load_addr;
end = start + image->elf_headers_sz - 1;
ret = crash_exclude_mem_range(cmem, start, end);
if (ret) return ret;
/* Exclude dm crypt keys region */ if (image->dm_crypt_keys_addr) {
start = image->dm_crypt_keys_addr;
end = start + image->dm_crypt_keys_sz - 1; return crash_exclude_mem_range(cmem, start, end);
}
return ret;
}
/* Prepare memory map for crash dump kernel */ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
{ unsignedint nr_ranges = 0; int i, ret = 0; unsignedlong flags; struct e820_entry ei; struct crash_memmap_data cmd; struct crash_mem *cmem;
/* * Using random kexec_buf for passing dm crypt keys may cause a range * split. So use two slots here.
*/
nr_ranges = 2;
cmem = vzalloc(struct_size(cmem, ranges, nr_ranges)); if (!cmem) return -ENOMEM;
/* Add crashk_low_res region */ if (crashk_low_res.end) {
ei.addr = crashk_low_res.start;
ei.size = resource_size(&crashk_low_res);
ei.type = E820_TYPE_RAM;
add_e820_entry(params, &ei);
}
/* Exclude some ranges from crashk_res and add rest to memmap */
ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end); if (ret) goto out;
for (i = 0; i < cmem->nr_ranges; i++) {
ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
/* If entry is less than a page, skip it */ if (ei.size < PAGE_SIZE) continue;
ei.addr = cmem->ranges[i].start;
ei.type = E820_TYPE_RAM;
add_e820_entry(params, &ei);
}
for (i = 0; i < crashk_cma_cnt; ++i) {
ei.addr = crashk_cma_ranges[i].start;
ei.size = crashk_cma_ranges[i].end -
crashk_cma_ranges[i].start + 1;
ei.type = E820_TYPE_RAM;
add_e820_entry(params, &ei);
}
int arch_crash_hotplug_support(struct kimage *image, unsignedlong kexec_flags)
{
#ifdef CONFIG_KEXEC_FILE if (image->file_mode) return 1; #endif /* * Initially, crash hotplug support for kexec_load was added * with the KEXEC_UPDATE_ELFCOREHDR flag. Later, this * functionality was expanded to accommodate multiple kexec * segment updates, leading to the introduction of the * KEXEC_CRASH_HOTPLUG_SUPPORT kexec flag bit. Consequently, * when the kexec tool sends either of these flags, it indicates * that the required kexec segment (elfcorehdr) is excluded from * the SHA calculation.
*/ return (kexec_flags & KEXEC_UPDATE_ELFCOREHDR ||
kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT);
}
/* kernel_map, VMCOREINFO and maximum CPUs */
sz = 2 + CONFIG_NR_CPUS_DEFAULT; if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
sz += CONFIG_CRASH_MAX_MEMORY_RANGES;
sz *= sizeof(Elf64_Phdr); return sz;
}
/** * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes * @image: a pointer to kexec_crash_image * @arg: struct memory_notify handler for memory hotplug case and * NULL for CPU hotplug case. * * Prepare the new elfcorehdr and replace the existing elfcorehdr.
*/ void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
{ void *elfbuf = NULL, *old_elfcorehdr; unsignedlong nr_mem_ranges; unsignedlong mem, memsz; unsignedlong elfsz = 0;
/* * As crash_prepare_elf64_headers() has already described all * possible CPUs, there is no need to update the elfcorehdr * for additional CPU changes.
*/ if ((image->file_mode || image->elfcorehdr_updated) &&
((image->hp_action == KEXEC_CRASH_HP_ADD_CPU) ||
(image->hp_action == KEXEC_CRASH_HP_REMOVE_CPU))) return;
/* * Create the new elfcorehdr reflecting the changes to CPU and/or * memory resources.
*/ if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {
pr_err("unable to create new elfcorehdr"); goto out;
}
/* * Obtain address and size of the elfcorehdr segment, and * check it against the new elfcorehdr buffer.
*/
mem = image->segment[image->elfcorehdr_index].mem;
memsz = image->segment[image->elfcorehdr_index].memsz; if (elfsz > memsz) {
pr_err("update elfcorehdr elfsz %lu > memsz %lu",
elfsz, memsz); goto out;
}
/* * Copy new elfcorehdr over the old elfcorehdr at destination.
*/
old_elfcorehdr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT)); if (!old_elfcorehdr) {
pr_err("mapping elfcorehdr segment failed\n"); goto out;
}
/* * Temporarily invalidate the crash image while the * elfcorehdr is updated.
*/
xchg(&kexec_crash_image, NULL);
memcpy_flushcache(old_elfcorehdr, elfbuf, elfsz);
xchg(&kexec_crash_image, image);
kunmap_local(old_elfcorehdr);
pr_debug("updated elfcorehdr\n");
out:
vfree(elfbuf);
} #endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.