#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME staticinlinebool arch_syscall_match_sym_name(constchar *sym, constchar *name)
{ /* * Only compare after the "sys" prefix. Archs that use * syscall wrappers may have syscalls symbols aliases prefixed * with ".SyS" or ".sys" instead of "sys", leading to an unwanted * mismatch.
*/ return !strcmp(sym + 3, name + 3);
} #endif
#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS /* * Some architectures that allow for 32bit applications * to run on a 64bit kernel, do not map the syscalls for * the 32bit tasks the same as they do for 64bit tasks. * * *cough*x86*cough* * * In such a case, instead of reporting the wrong syscalls, * simply ignore them. * * For an arch to ignore the compat syscalls it needs to * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as * define the function arch_trace_is_compat_syscall() to let * the tracing system know that it should ignore it.
*/ staticint
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{ if (unlikely(arch_trace_is_compat_syscall(regs))) return -1;
if (entry->enter_event == call)
kfree(call->print_fmt);
}
staticint __init syscall_enter_define_fields(struct trace_event_call *call)
{ struct syscall_trace_enter trace; struct syscall_metadata *meta = call->data; int offset = offsetof(typeof(trace), args); int ret = 0; int i;
for (i = 0; i < meta->nb_args; i++) {
ret = trace_define_field(call, meta->types[i],
meta->args[i], offset, sizeof(unsignedlong), 0,
FILTER_OTHER); if (ret) break;
offset += sizeof(unsignedlong);
}
/* * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled.
*/
might_fault();
guard(preempt_notrace)();
/* * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled.
*/
might_fault();
guard(preempt_notrace)();
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */
perf_fetch_caller_regs(regs);
*(struct pt_regs **)¶m = regs;
param.syscall_nr = rec->nr; for (i = 0; i < sys_data->nb_args; i++)
param.args[i] = rec->args[i]; return trace_call_bpf(call, ¶m);
}
staticvoid perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{ struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; struct pt_regs *fake_regs; struct hlist_head *head; unsignedlong args[6]; bool valid_prog_array; int syscall_nr; int rctx; int size;
/* * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled.
*/
might_fault();
guard(preempt_notrace)();
syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return;
sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return;
head = this_cpu_ptr(sys_data->enter_event->perf_events);
valid_prog_array = bpf_prog_array_valid(sys_data->enter_event); if (!valid_prog_array && hlist_empty(head)) return;
/* get the size after alignment with the u32 buffer size field */
size = sizeof(unsignedlong) * sys_data->nb_args + sizeof(*rec);
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
rec = perf_trace_buf_alloc(size, &fake_regs, &rctx); if (!rec) return;
/* bpf prog requires 'regs' to be the first member in the ctx (a.k.a. ¶m) */
perf_fetch_caller_regs(regs);
*(struct pt_regs **)¶m = regs;
param.syscall_nr = rec->nr;
param.ret = rec->ret; return trace_call_bpf(call, ¶m);
}
staticvoid perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{ struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; struct pt_regs *fake_regs; struct hlist_head *head; bool valid_prog_array; int syscall_nr; int rctx; int size;
/* * Syscall probe called with preemption enabled, but the ring * buffer and per-cpu data require preemption to be disabled.
*/
might_fault();
guard(preempt_notrace)();
syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return;
sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return;
head = this_cpu_ptr(sys_data->exit_event->perf_events);
valid_prog_array = bpf_prog_array_valid(sys_data->exit_event); if (!valid_prog_array && hlist_empty(head)) return;
/* We can probably do that at build time */
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
rec = perf_trace_buf_alloc(size, &fake_regs, &rctx); if (!rec) return;
switch (type) { case TRACE_REG_REGISTER: return reg_event_syscall_enter(file, event); case TRACE_REG_UNREGISTER:
unreg_event_syscall_enter(file, event); return 0;
#ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return perf_sysenter_enable(event); case TRACE_REG_PERF_UNREGISTER:
perf_sysenter_disable(event); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif
} return 0;
}
switch (type) { case TRACE_REG_REGISTER: return reg_event_syscall_exit(file, event); case TRACE_REG_UNREGISTER:
unreg_event_syscall_exit(file, event); return 0;
#ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return perf_sysexit_enable(event); case TRACE_REG_PERF_UNREGISTER:
perf_sysexit_disable(event); return 0; case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif
} return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.