/* SPDX-License-Identifier: GPL-2.0-only */ /* * KVM nVHE hypervisor stack tracing support. * * The unwinder implementation depends on the nVHE mode: * * 1) Non-protected nVHE mode - the host can directly access the * HYP stack pages and unwind the HYP stack in EL1. This saves having * to allocate shared buffers for the host to read the unwinded * stacktrace. * * 2) pKVM (protected nVHE) mode - the host cannot directly access * the HYP memory. The stack is unwinded in EL2 and dumped to a shared * buffer where the host can read and print the stacktrace. * * Copyright (C) 2022 Google LLC
*/
/* * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to * allow for guard pages below the stack. Consequently, the fixed offset address * translation macros won't work here. * * The kernel VA is calculated as an offset from the kernel VA of the hypervisor * stack base. * * Returns true on success and updates @addr to its corresponding kernel VA; * otherwise returns false.
*/ staticbool kvm_nvhe_stack_kern_va(unsignedlong *addr, unsignedlong size)
{ struct stack_info stack_hyp, stack_kern;
/* * Convert a KVN nVHE HYP frame record address to a kernel VA
*/ staticbool kvm_nvhe_stack_kern_record_va(unsignedlong *addr)
{ return kvm_nvhe_stack_kern_va(addr, 16);
}
staticint unwind_next(struct unwind_state *state)
{ /* * The FP is in the hypervisor VA space. Convert it to the kernel VA * space so it can be unwound by the regular unwind functions.
*/ if (!kvm_nvhe_stack_kern_record_va(&state->fp)) return -EINVAL;
return unwind_next_frame_record(state);
}
staticvoid unwind(struct unwind_state *state,
stack_trace_consume_fn consume_entry, void *cookie)
{ while (1) { int ret;
if (!consume_entry(cookie, state->pc)) break;
ret = unwind_next(state); if (ret < 0) break;
}
}
/* * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry * * @arg : the hypervisor offset, used for address translation * @where : the program counter corresponding to the stack frame
*/ staticbool kvm_nvhe_dump_backtrace_entry(void *arg, unsignedlong where)
{ unsignedlong va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0); unsignedlong hyp_offset = (unsignedlong)arg;
/* Mask tags and convert to kern addr */
where = (where & va_mask) + hyp_offset;
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
staticvoid kvm_nvhe_dump_backtrace_end(void)
{
kvm_err("---[ end nVHE call trace ]---\n");
}
/* * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. * * @hyp_offset: hypervisor offset, used for address translation. * * The host can directly access HYP stack pages in non-protected * mode, so the unwinding is done directly from EL1. This removes * the need for shared buffers between host and hypervisor for * the stacktrace.
*/ staticvoid hyp_dump_backtrace(unsignedlong hyp_offset)
{ struct kvm_nvhe_stacktrace_info *stacktrace_info; struct stack_info stacks[] = {
stackinfo_get_overflow_kern_va(),
stackinfo_get_hyp_kern_va(),
}; struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
/* * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. * * @hyp_offset: hypervisor offset, used for address translation. * * Dumping of the pKVM HYP backtrace is done by reading the * stack addresses from the shared stacktrace buffer, since the * host cannot directly access hypervisor memory in protected * mode.
*/ staticvoid pkvm_dump_backtrace(unsignedlong hyp_offset)
{ unsignedlong *stacktrace
= (unsignedlong *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); int i;
kvm_nvhe_dump_backtrace_start(); /* The saved stacktrace is terminated by a null entry */ for (i = 0;
i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
i++)
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
kvm_nvhe_dump_backtrace_end();
} #else/* !CONFIG_PROTECTED_NVHE_STACKTRACE */ staticvoid pkvm_dump_backtrace(unsignedlong hyp_offset)
{
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
} #endif/* CONFIG_PROTECTED_NVHE_STACKTRACE */
/* * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. * * @hyp_offset: hypervisor offset, used for address translation.
*/ void kvm_nvhe_dump_backtrace(unsignedlong hyp_offset)
{ if (is_protected_kvm_enabled())
pkvm_dump_backtrace(hyp_offset); else
hyp_dump_backtrace(hyp_offset);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.