machine->id_hdr_size = 0;
machine->kptr_restrict_warned = false;
machine->comm_exec = false;
machine->kernel_start = 0;
machine->vmlinux_map = NULL; /* There is no initial context switch in, so we start at 1. */
machine->parallelism = 1;
machine->root_dir = strdup(root_dir); if (machine->root_dir == NULL) goto out;
struct machine *machine__new_kallsyms(struct perf_env *host_env)
{ struct machine *machine = machine__new_host(host_env); /* * FIXME: * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly * ask for not using the kcore parsing code, once this one is fixed * to create a map per module.
*/ if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
machine__delete(machine);
machine = NULL;
}
if (!machine)
machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); return machine;
}
/* * A common case for KVM test programs is that the test program acts as the * hypervisor, creating, running and destroying the virtual machine, and * providing the guest object code from its own object code. In this case, * the VM is not running an OS, but only the functions loaded into it by the * hypervisor test program, and conveniently, loaded at the same virtual * addresses. * * Normally to resolve addresses, MMAP events are needed to map addresses * back to the object code and debug symbols for that object code. * * Currently, there is no way to get such mapping information from guests * but, in the scenario described above, the guest has the same mappings * as the hypervisor, so support for that scenario can be achieved. * * To support that, copy the host thread's maps to the guest thread's maps. * Note, we do not discover the guest until we encounter a guest event, * which works well because it is not until then that we know that the host * thread's maps have been set up. * * This function returns the guest thread. Apart from keeping the data * structures sane, using a thread belonging to the guest machine, instead * of the host thread, allows it to have its own comm (refer * thread__set_guest_comm()).
*/ staticstruct thread *findnew_guest_code(struct machine *machine, struct machine *host_machine,
pid_t pid)
{ struct thread *host_thread; struct thread *thread; int err;
if (!machine) return NULL;
thread = machine__findnew_thread(machine, -1, pid); if (!thread) return NULL;
/* Assume maps are set up if there are any */ if (!maps__empty(thread__maps(thread))) return thread;
host_thread = machine__find_thread(host_machine, -1, pid); if (!host_thread) goto out_err;
thread__set_guest_comm(thread, pid);
/* * Guest code can be found in hypervisor process at the same address * so copy host maps.
*/
err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
thread__put(host_thread); if (err) goto out_err;
leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); if (!leader) goto out_err;
if (!thread__maps(leader))
thread__set_maps(leader, maps__new(machine));
if (!thread__maps(leader)) goto out_err;
if (thread__maps(th) == thread__maps(leader)) goto out_put;
if (thread__maps(th)) { /* * Maps are created from MMAP events which provide the pid and * tid. Consequently there never should be any maps on a thread * with an unknown pid. Just print an error if there are.
*/ if (!maps__empty(thread__maps(th)))
pr_err("Discarding thread maps for %d:%d\n",
thread__pid(th), thread__tid(th));
maps__put(thread__maps(th));
}
thread__set_maps(th, maps__get(thread__maps(leader)));
out_put:
thread__put(leader); return;
out_err:
pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th)); goto out_put;
}
/* * Caller must eventually drop thread->refcnt returned with a successful * lookup/new thread inserted.
*/ staticstruct thread *__machine__findnew_thread(struct machine *machine,
pid_t pid,
pid_t tid, bool create)
{ struct thread *th = threads__find(&machine->threads, tid); bool created;
if (th) {
machine__update_thread_pid(machine, th, pid); return th;
} if (!create) return NULL;
th = threads__findnew(&machine->threads, pid, tid, &created); if (created) { /* * We have to initialize maps separately after rb tree is * updated. * * The reason is that we call machine__findnew_thread within * thread__init_maps to find the thread leader and that would * screwed the rb tree.
*/ if (thread__init_maps(th, machine)) {
pr_err("Thread init failed thread %d\n", pid);
threads__remove(&machine->threads, th);
thread__put(th); return NULL;
}
} else
machine__update_thread_pid(machine, th, pid);
/* * Threads are identified by pid and tid, and the idle task has pid == tid == 0. * So here a single thread is created for that, but actually there is a separate * idle task per cpu, so there should be one 'struct thread' per cpu, but there * is only 1. That causes problems for some tools, requiring workarounds. For * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
*/ struct thread *machine__idle_thread(struct machine *machine)
{ struct thread *thread = machine__findnew_thread(machine, 0, 0);
if (!thread || thread__set_comm(thread, "swapper", 0) ||
thread__set_namespaces(thread, 0, NULL))
pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
return thread;
}
struct comm *machine__thread_exec_comm(struct machine *machine, struct thread *thread)
{ if (machine->comm_exec) return thread__exec_comm(thread); else return thread__comm(thread);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
{ struct thread *thread = machine__findnew_thread(machine,
event->comm.pid,
event->comm.tid); bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; int err = 0;
if (exec)
machine->comm_exec = true;
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
int machine__process_namespaces_event(struct machine *machine __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused)
{ struct thread *thread = machine__findnew_thread(machine,
event->namespaces.pid,
event->namespaces.tid); int err = 0;
WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, "\nWARNING: kernel seems to support more namespaces than perf" " tool.\nTry updating the perf tool..\n\n");
WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, "\nWARNING: perf tool seems to support more namespaces than" " the kernel.\nTry updating the kernel..\n\n");
if (dump_trace)
perf_event__fprintf_namespaces(event, stdout);
int machine__process_aux_event(struct machine *machine __maybe_unused, union perf_event *event)
{ if (dump_trace)
perf_event__fprintf_aux(event, stdout); return 0;
}
int machine__process_itrace_start_event(struct machine *machine __maybe_unused, union perf_event *event)
{ if (dump_trace)
perf_event__fprintf_itrace_start(event, stdout); return 0;
}
int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused, union perf_event *event)
{ if (dump_trace)
perf_event__fprintf_aux_output_hw_id(event, stdout); return 0;
}
int machine__process_switch_event(struct machine *machine __maybe_unused, union perf_event *event)
{ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
if (dump_trace)
perf_event__fprintf_switch(event, stdout);
machine->parallelism += out ? -1 : 1; return 0;
}
staticint machine__process_ksymbol_register(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused)
{ struct symbol *sym; struct dso *dso = NULL; struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); int err = 0;
if (dso) {
u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len; int ret;
/* * Kernel maps might be changed when loading symbols so loading * must be done prior to using kernel maps.
*/
map__load(map);
ret = dso__data_write_cache_addr(dso, map, machine,
event->text_poke.addr,
new_bytes,
event->text_poke.new_len); if (ret != event->text_poke.new_len)
pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
event->text_poke.addr);
} else {
pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
event->text_poke.addr);
}
out:
map__put(map); return 0;
}
/* Figure out the start address of kernel map from /proc/kallsyms. * Returns the name of the start symbol in *symbol_name. Pass in NULL as * symbol_name if it's not that important.
*/ staticint machine__get_running_kernel_start(struct machine *machine, constchar **symbol_name,
u64 *start, u64 *end)
{ char filename[PATH_MAX]; int i, err = -1; constchar *name;
u64 addr = 0;
if (!err) {
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map__start(map), map__end(map));
}
map__put(map);
return err;
}
static u64 find_entry_trampoline(struct dso *dso)
{ /* Duplicates are removed so lookup all aliases */ constchar *syms[] = { "_entry_trampoline", "__entry_trampoline_start", "entry_SYSCALL_64_trampoline",
}; struct symbol *sym = dso__first_symbol(dso); unsignedint i;
for (; sym; sym = dso__next_symbol(sym)) { if (sym->binding != STB_GLOBAL) continue; for (i = 0; i < ARRAY_SIZE(syms); i++) { if (!strcmp(sym->name, syms[i])) return sym->start;
}
}
return 0;
}
/* * These values can be used for kernels that do not have symbols for the entry * trampolines in kallsyms.
*/ #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 #define X86_64_ENTRY_TRAMPOLINE 0x6000
/* * In the vmlinux case, pgoff is a virtual address which must now be * mapped to a vmlinux offset.
*/
maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
if (args.found || machine->trampolines_mapped) return 0;
pgoff = find_entry_trampoline(kernel); if (!pgoff) return 0;
nr_cpus_avail = machine__nr_cpus_avail(machine);
/* Add a 1 page map for each CPU's entry trampoline */ for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
cpu * X86_64_CPU_ENTRY_AREA_SIZE +
X86_64_ENTRY_TRAMPOLINE; struct extra_kernel_map xm = {
.start = va,
.end = va + page_size,
.pgoff = pgoff,
};
staticint
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{ /* In case of renewal the kernel map, destroy previous one */
machine__destroy_kernel_maps(machine);
int machines__create_guest_kernel_maps(struct machines *machines)
{ int ret = 0; struct dirent **namelist = NULL; int i, items = 0; char path[PATH_MAX];
pid_t pid; char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
symbol_conf.default_guest_kallsyms) {
machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
}
if (symbol_conf.guestmount) {
items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); if (items <= 0) return -ENOENT; for (i = 0; i < items; i++) { if (!isdigit(namelist[i]->d_name[0])) { /* Filter out . and .. */ continue;
}
pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); if ((*endp != '\0') ||
(endp == namelist[i]->d_name) ||
(errno == ERANGE)) {
pr_debug("invalid directory (%s). Skipping.\n",
namelist[i]->d_name); continue;
}
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
ret = access(path, R_OK); if (ret) {
pr_debug("Can't access file %s\n", path); goto failure;
}
machines__create_kernel_maps(machines, pid);
}
failure:
free(namelist);
}
int machine__load_kallsyms(struct machine *machine, constchar *filename)
{ struct map *map = machine__kernel_map(machine); struct dso *dso = map__dso(map); int ret = __dso__load_kallsyms(dso, filename, map, true);
if (ret > 0) {
dso__set_loaded(dso); /* * Since /proc/kallsyms will have multiple sessions for the * kernel, with modules between them, fixup the end of all * sections.
*/
maps__fixup_end(machine__kernel_maps(machine));
}
return ret;
}
int machine__load_vmlinux_path(struct machine *machine)
{ struct map *map = machine__kernel_map(machine); struct dso *dso = map__dso(map); int ret = dso__load_vmlinux_path(dso, map);
/* * Full name could reveal us kmod compression, so * we need to update the symtab_type if needed.
*/ if (m->comp && is_kmod_dso(dso)) {
dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
dso__set_comp(dso, m->comp);
}
map__put(map); return 0;
}
staticint maps__set_modules_path_dir(struct maps *maps, char *path, size_t path_size, int depth)
{ struct io_dirent64 *dent; struct io_dir iod;
size_t root_len = strlen(path); int ret = 0;
io_dir__init(&iod, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); if (iod.dirfd < 0) {
pr_debug("%s: cannot open %s dir\n", __func__, path); return -1;
} /* Bounds check, should never happen. */ if (root_len >= path_size) return -1;
path[root_len++] = '/'; while ((dent = io_dir__readdir(&iod)) != NULL) { if (io_dir__is_dir(&iod, dent)) { if (!strcmp(dent->d_name, ".") ||
!strcmp(dent->d_name, "..")) continue;
/* Do not follow top-level source and build symlinks */ if (depth == 0) { if (!strcmp(dent->d_name, "source") ||
!strcmp(dent->d_name, "build")) continue;
}
/* Bounds check, should never happen. */ if (root_len + strlen(dent->d_name) >= path_size) continue;
strcpy(path + root_len, dent->d_name);
ret = maps__set_modules_path_dir(maps, path, path_size, depth + 1); if (ret < 0) goto out;
} else { struct kmod_path m;
ret = kmod_path__parse_name(&m, dent->d_name); if (ret) goto out;
if (m.kmod) { /* Bounds check, should never happen. */ if (root_len + strlen(dent->d_name) < path_size) {
strcpy(path + root_len, dent->d_name);
ret = maps__set_module_path(maps, path, &m);
staticvoid machine__set_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
map__set_start(machine->vmlinux_map, start);
map__set_end(machine->vmlinux_map, end); /* * Be a bit paranoid here, some perf.data file came with * a zero sized synthesized MMAP event for the kernel.
*/ if (start == 0 && end == 0)
map__set_end(machine->vmlinux_map, ~0ULL);
}
int machine__create_kernel_maps(struct machine *machine)
{ struct dso *kernel = machine__get_kernel(machine); constchar *name = NULL;
u64 start = 0, end = ~0ULL; int ret;
if (kernel == NULL) return -1;
ret = __machine__create_kernel_maps(machine, kernel); if (ret < 0) goto out_put;
if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { if (machine__is_host(machine))
pr_debug("Problems creating module maps, " "continuing anyway...\n"); else
pr_debug("Problems creating module maps for guest %d, " "continuing anyway...\n", machine->pid);
}
if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { if (name &&
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
machine__destroy_kernel_maps(machine);
ret = -1; goto out_put;
}
/* * we have a real start address now, so re-order the kmaps * assume it's the last in the kmaps
*/
ret = machine__update_kernel_mmap(machine, start, end); if (ret < 0) goto out_put;
}
if (machine__create_extra_kernel_maps(machine, kernel))
pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
if (end == ~0ULL) { /* update end address of the kernel map using adjacent module address */ struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
machine__kernel_map(machine));
if (next) {
machine__set_kernel_mmap(machine, start, map__start(next));
map__put(next);
}
}
/* If we have maps from kcore then we do not need or want any others */ if (machine__uses_kcore(machine)) return 0;
if (machine__is_host(machine))
dso_space = DSO_SPACE__KERNEL; else
dso_space = DSO_SPACE__KERNEL_GUEST;
is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; if (!is_kernel_mmap && !machine__is_host(machine)) { /* * If the event was recorded inside the guest and injected into * the host perf.data file, then it will match a host mmap_name, * so try that - see machine__set_mmap_name().
*/
mmap_name = "[kernel.kallsyms]";
is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
} if (xm->name[0] == '/' ||
(!is_kernel_mmap && xm->name[0] == '[')) { struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
if (build_id__is_defined(bid))
dso__set_build_id(map__dso(map), bid);
map__put(map);
} elseif (is_kernel_mmap) { constchar *symbol_name = xm->name + strlen(mmap_name); /* * Should be there already, from the build-id table in * the header.
*/ struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
if (kernel == NULL)
kernel = machine__findnew_dso(machine, machine->mmap_name); if (kernel == NULL) goto out_problem;
if (build_id__is_defined(bid))
dso__set_build_id(kernel, bid);
/* * Avoid using a zero address (kptr_restrict) for the ref reloc * symbol. Effectively having zero here means that at record * time /proc/sys/kernel/kptr_restrict was non zero.
*/ if (xm->pgoff != 0) {
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
symbol_name,
xm->pgoff);
}
if (dump_trace)
perf_event__fprintf_task(event, stdout);
/* * There may be an existing thread that is not actually the parent, * either because we are processing events out of order, or because the * (fork) event that would have removed the thread was lost. Assume the * latter case and continue on as best we can.
*/ if (thread__pid(parent) != (pid_t)event->fork.ppid) {
dump_printf("removing erroneous parent thread %d/%d\n",
thread__pid(parent), thread__tid(parent));
machine__remove_thread(machine, parent);
thread__put(parent);
parent = machine__findnew_thread(machine, event->fork.ppid,
event->fork.ptid);
}
/* if a thread currently exists for the thread id remove it */ if (thread != NULL) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid); /* * When synthesizing FORK events, we are trying to create thread * objects for the already running tasks on the machine. * * Normally, for a kernel FORK event, we want to clone the parent's * maps because that is what the kernel just did. * * But when synthesizing, this should not be done. If we do, we end up * with overlapping maps as we process the synthesized MMAP2 events that * get delivered shortly thereafter. * * Use the FORK event misc flags in an internal way to signal this * situation, so we can elide the map clone when appropriate.
*/ if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
do_maps_clone = false;
int machine__process_exit_event(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused)
{ struct thread *thread = machine__find_thread(machine,
event->fork.pid,
event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
/* There is no context switch out before exit, so we decrement here. */
machine->parallelism--; if (thread != NULL) { if (symbol_conf.keep_exited_threads)
thread__set_exited(thread, /*exited=*/true); else
machine__remove_thread(machine, thread);
}
thread__put(thread); return 0;
}
int machine__process_event(struct machine *machine, union perf_event *event, struct perf_sample *sample)
{ int ret;
switch (event->header.type) { case PERF_RECORD_COMM:
ret = machine__process_comm_event(machine, event, sample); break; case PERF_RECORD_MMAP:
ret = machine__process_mmap_event(machine, event, sample); break; case PERF_RECORD_NAMESPACES:
ret = machine__process_namespaces_event(machine, event, sample); break; case PERF_RECORD_CGROUP:
ret = machine__process_cgroup_event(machine, event, sample); break; case PERF_RECORD_MMAP2:
ret = machine__process_mmap2_event(machine, event, sample); break; case PERF_RECORD_FORK:
ret = machine__process_fork_event(machine, event, sample); break; case PERF_RECORD_EXIT:
ret = machine__process_exit_event(machine, event, sample); break; case PERF_RECORD_LOST:
ret = machine__process_lost_event(machine, event, sample); break; case PERF_RECORD_AUX:
ret = machine__process_aux_event(machine, event); break; case PERF_RECORD_ITRACE_START:
ret = machine__process_itrace_start_event(machine, event); break; case PERF_RECORD_LOST_SAMPLES:
ret = machine__process_lost_samples_event(machine, event, sample); break; case PERF_RECORD_SWITCH: case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break; case PERF_RECORD_KSYMBOL:
ret = machine__process_ksymbol(machine, event, sample); break; case PERF_RECORD_BPF_EVENT:
ret = machine__process_bpf(machine, event, sample); break; case PERF_RECORD_TEXT_POKE:
ret = machine__process_text_poke(machine, event, sample); break; case PERF_RECORD_AUX_OUTPUT_HW_ID:
ret = machine__process_aux_output_hw_id_event(machine, event); break; default:
ret = -1; break;
}
addr_location__init(&al); /* * We cannot use the header.misc hint to determine whether a * branch stack address is user, kernel, guest, hypervisor. * Branches may straddle the kernel/user/hypervisor boundaries. * Thus, we have to try consecutively until we find a match * or else, the symbol is unknown
*/
thread__find_cpumode_addr_location(thread, ip, /*symbols=*/true, &al);
/* Remove loops. */ staticint remove_loops(struct branch_entry *l, int nr, struct iterations *iter)
{ int i, j, off; unsignedchar chash[CHASHSZ];
memset(chash, NO_ENTRY, sizeof(chash));
BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
for (i = 0; i < nr; i++) { int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
/* no collision handling for now */ if (chash[h] == NO_ENTRY) {
chash[h] = i;
} elseif (l[chash[h]].from == l[i].from) { bool is_loop = true; /* check if it is a real loop */
off = 0; for (j = chash[h]; j < i && i + off < nr; j++, off++) if (l[j].from != l[i + off].from) {
is_loop = false; break;
} if (is_loop) {
j = nr - (i + off); if (j > 0) {
save_iterations(iter + i + off,
l + i, off);
memmove(iter + i, iter + i + off,
j * sizeof(*iter));
/* * The curr and pos are not used in writing session. They are cleared * in callchain_cursor_commit() when the writing session is closed. * Using curr and pos to track the current cursor node.
*/ if (thread__lbr_stitch(thread)) {
cursor->curr = NULL;
cursor->pos = cursor->nr; if (cursor->nr) {
cursor->curr = cursor->first; for (i = 0; i < (int)(cursor->nr - 1); i++)
cursor->curr = cursor->curr->next;
}
}
if (callee) { /* Add LBR ip from first entries.to */
ip = entries[0].to;
flags = &entries[0].flags;
*branch_from = entries[0].from;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip, true, flags, NULL,
*branch_from, symbols); if (err) return err;
/* * The number of cursor node increases. * Move the current cursor node. * But does not need to save current cursor node for entry 0. * It's impossible to stitch the whole LBRs of previous sample.
*/ if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) { if (!cursor->curr)
cursor->curr = cursor->first; else
cursor->curr = cursor->curr->next;
cursor->pos++;
}
/* Add LBR ip from entries.from one by one. */ for (i = 0; i < lbr_nr; i++) {
ip = entries[i].from;
flags = &entries[i].flags;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip, true, flags, NULL,
*branch_from, symbols); if (err) return err;
save_lbr_cursor_node(thread, cursor, i);
} return 0;
}
/* Add LBR ip from entries.from one by one. */ for (i = lbr_nr - 1; i >= 0; i--) {
ip = entries[i].from;
flags = &entries[i].flags;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip, true, flags, NULL,
*branch_from, symbols); if (err) return err;
save_lbr_cursor_node(thread, cursor, i);
}
if (lbr_nr > 0) { /* Add LBR ip from first entries.to */
ip = entries[0].to;
flags = &entries[0].flags;
*branch_from = entries[0].from;
err = add_callchain_ip(thread, cursor, parent,
root_al, &cpumode, ip, true, flags, NULL,
*branch_from, symbols); if (err) return err;
}
/* Find the physical index of the base-of-stack for current sample. */
cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
(max_lbr + prev_stack->hw_idx - cur_base); /* Previous sample has shorter stack. Nothing can be stitched. */ if (distance + 1 > prev_stack->nr) returnfalse;
/* * Check if there are identical LBRs between two samples. * Identical LBRs must have same from, to and flags values. Also, * they have to be saved in the same LBR registers (same physical * index). * * Starts from the base-of-stack of current sample.
*/ for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { if ((prev_entries[i].from != cur_entries[j].from) ||
(prev_entries[i].to != cur_entries[j].to) ||
(prev_entries[i].flags.value != cur_entries[j].flags.value)) break;
nr_identical_branches++;
}
if (!nr_identical_branches) returnfalse;
/* * Save the LBRs between the base-of-stack of previous sample * and the base-of-stack of current sample into lbr_stitch->lists. * These LBRs will be stitched later.
*/ for (i = prev_stack->nr - 1; i > (int)distance; i--) {
if (!lbr_stitch->prev_lbr_cursor[i].valid) continue;
stitch_node = get_stitch_node(thread); if (!stitch_node) returnfalse;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.