tuser = kzalloc(sizeof(*tuser), GFP_KERNEL); if (!tuser) return NULL;
tuser->name = kstrdup(name, GFP_KERNEL); if (!tuser->name) return NULL;
/* Register tracepoint if it is loaded. */ if (tpoint) {
tuser->tpoint = tpoint;
ret = tracepoint_user_register(tuser); if (ret) return ERR_PTR(ret);
}
/* * Get tracepoint_user if exist, or allocate new one and register it. * If tracepoint is on a module, get its refcounter too. * This returns errno or NULL (not loaded yet) or tracepoint_user.
*/ staticstruct tracepoint_user *tracepoint_user_find_get(constchar *name, struct module **pmod)
{ struct module *mod __free(module_put) = NULL; struct tracepoint_user *tuser; struct tracepoint *tpoint;
if (!name || !pmod) return ERR_PTR(-EINVAL);
/* Get and lock the module which has tracepoint. */
tpoint = find_tracepoint(name, &mod);
/* The corresponding tracepoint_user is not found. */
tuser = __tracepoint_user_init(name, tpoint); if (!IS_ERR_OR_NULL(tuser))
*pmod = no_free_ptr(mod);
DEFINE_FREE(tuser_put, struct tracepoint_user *, if (!IS_ERR_OR_NULL(_T))
tracepoint_user_put(_T))
/* * Fprobe event core functions
*/
/* * @tprobe is true for tracepoint probe. * @tuser can be NULL if the trace_fprobe is disabled or the tracepoint is not * loaded with a module. If @tuser != NULL, this trace_fprobe is enabled.
*/ struct trace_fprobe { struct dyn_event devent; struct fprobe fp; constchar *symbol; bool tprobe; struct tracepoint_user *tuser; struct trace_probe tp;
};
/** * for_each_trace_fprobe - iterate over the trace_fprobe list * @pos: the struct trace_fprobe * for each entry * @dpos: the struct dyn_event * to use as a loop cursor
*/ #define for_each_trace_fprobe(pos, dpos) \
for_each_dyn_event(dpos) \ if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
/* * Note that we don't verify the fetch_insn code, since it does not come * from user space.
*/ staticint
process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, void *dest, void *base)
{ struct ftrace_regs *fregs = rec; unsignedlong val; int ret;
retry: /* 1st stage: get value from context */ switch (code->op) { case FETCH_OP_STACK:
val = ftrace_regs_get_kernel_stack_nth(fregs, code->param); break; case FETCH_OP_STACKP:
val = ftrace_regs_get_stack_pointer(fregs); break; case FETCH_OP_RETVAL:
val = ftrace_regs_get_return_value(fregs); break; #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API case FETCH_OP_ARG:
val = ftrace_regs_get_argument(fregs, code->param); break; case FETCH_OP_EDATA:
val = *(unsignedlong *)((unsignedlong)edata + code->offset); break; #endif case FETCH_NOP_SYMBOL: /* Ignore a place holder */
code++; goto retry; default:
ret = process_common_fetch_insn(code, &val); if (ret < 0) return ret;
}
code++;
staticvoid free_trace_fprobe(struct trace_fprobe *tf)
{ if (tf) {
trace_probe_cleanup(&tf->tp); if (tf->tuser)
tracepoint_user_put(tf->tuser);
kfree(tf->symbol);
kfree(tf);
}
}
/* Since alloc_trace_fprobe() can return error, check the pointer is ERR too. */
DEFINE_FREE(free_trace_fprobe, struct trace_fprobe *, if (!IS_ERR_OR_NULL(_T)) free_trace_fprobe(_T))
/* * Allocate new trace_probe and initialize it (including fprobe).
*/ staticstruct trace_fprobe *alloc_trace_fprobe(constchar *group, constchar *event, constchar *symbol, int nargs, bool is_return, bool is_tracepoint)
{ struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; int ret = -ENOMEM;
tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL); if (!tf) return ERR_PTR(ret);
tf->symbol = kstrdup(symbol, GFP_KERNEL); if (!tf->symbol) return ERR_PTR(-ENOMEM);
if (is_return)
tf->fp.exit_handler = fexit_dispatcher; else
tf->fp.entry_handler = fentry_dispatcher;
tf->tprobe = is_tracepoint;
ret = trace_probe_init(&tf->tp, event, group, false, nargs); if (ret < 0) return ERR_PTR(ret);
/* If the tracepoint is in a module, it must be locked in this function. */
tuser = tracepoint_user_find_get(tf->symbol, &mod); /* This tracepoint is not loaded yet */ if (IS_ERR(tuser)) return PTR_ERR(tuser); if (!tuser) return -ENOMEM;
/* Register fprobe only if the tracepoint is loaded. */ if (tuser->tpoint) {
ip = tracepoint_user_ip(tuser); if (WARN_ON_ONCE(!ip)) return -ENOENT;
ret = register_fprobe_ips(&tf->fp, &ip, 1); if (ret < 0) return ret;
}
tf->tuser = no_free_ptr(tuser); return 0;
}
/* Returns an error if the target function is not available, or 0 */ staticint trace_fprobe_verify_target(struct trace_fprobe *tf)
{ int ret;
/* Tracepoint should have a stub function. */ if (trace_fprobe_is_tracepoint(tf)) return 0;
/* * Note: since we don't lock the module, even if this succeeded, * register_fprobe() later can fail.
*/
ret = fprobe_count_ips_from_filter(tf->symbol, NULL); return (ret < 0) ? ret : 0;
}
/* Internal register function - just handle fprobe and flags */ staticint __register_trace_fprobe(struct trace_fprobe *tf)
{ int i, ret;
/* Should we need new LOCKDOWN flag for fprobe? */
ret = security_locked_down(LOCKDOWN_KPROBES); if (ret) return ret;
if (trace_fprobe_is_registered(tf)) return -EINVAL;
for (i = 0; i < tf->tp.nr_args; i++) {
ret = traceprobe_update_arg(&tf->tp.args[i]); if (ret) return ret;
}
tf->fp.flags &= ~FPROBE_FL_DISABLED;
if (trace_fprobe_is_tracepoint(tf)) return __regsiter_tracepoint_fprobe(tf);
/* TODO: handle filter, nofilter or symbol list */ return register_fprobe(&tf->fp, tf->symbol, NULL);
}
/* Internal unregister function - just handle fprobe and flags */ staticvoid __unregister_trace_fprobe(struct trace_fprobe *tf)
{ if (trace_fprobe_is_registered(tf))
unregister_fprobe(&tf->fp); if (tf->tuser) {
tracepoint_user_put(tf->tuser);
tf->tuser = NULL;
}
}
/* TODO: make this trace_*probe common function */ /* Unregister a trace_probe and probe_event */ staticint unregister_trace_fprobe(struct trace_fprobe *tf)
{ /* If other probes are on the event, just unregister fprobe */ if (trace_probe_has_sibling(&tf->tp)) goto unreg;
/* Enabled event can not be unregistered */ if (trace_probe_is_enabled(&tf->tp)) return -EBUSY;
/* If there's a reference to the dynamic event */ if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp))) return -EBUSY;
/* Will fail if probe is being used by ftrace or perf */ if (unregister_fprobe_event(tf)) return -EBUSY;
list_for_each_entry(orig, &tpe->probes, tp.list) { if (strcmp(trace_fprobe_symbol(orig),
trace_fprobe_symbol(comp))) continue;
/* * trace_probe_compare_arg_type() ensured that nr_args and * each argument name and type are same. Let's compare comm.
*/ for (i = 0; i < orig->tp.nr_args; i++) { if (strcmp(orig->tp.args[i].comm,
comp->tp.args[i].comm)) break;
}
if (i == orig->tp.nr_args) returntrue;
}
returnfalse;
}
staticint append_trace_fprobe_event(struct trace_fprobe *tf, struct trace_fprobe *to)
{ int ret;
if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE); return -EEXIST;
}
ret = trace_probe_compare_arg_type(&tf->tp, &to->tp); if (ret) { /* Note that argument starts index = 2 */
trace_probe_log_set_index(ret + 1);
trace_probe_log_err(0, DIFF_ARG_TYPE); return -EEXIST;
} if (trace_fprobe_has_same_fprobe(to, tf)) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, SAME_PROBE); return -EEXIST;
}
/* Append to existing event */
ret = trace_probe_append(&tf->tp, &to->tp); if (ret) return ret;
ret = trace_fprobe_verify_target(tf); if (ret)
trace_probe_unlink(&tf->tp); else
dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
return ret;
}
/* Register a trace_probe and probe_event, and check the fprobe is available. */ staticint register_trace_fprobe_event(struct trace_fprobe *tf)
{ struct trace_fprobe *old_tf; int ret;
guard(mutex)(&event_mutex);
old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
trace_probe_group_name(&tf->tp)); if (old_tf) return append_trace_fprobe_event(tf, old_tf);
/* Register new event */
ret = register_fprobe_event(tf); if (ret) { if (ret == -EEXIST) {
trace_probe_log_set_index(0);
trace_probe_log_err(0, EVENT_EXIST);
} else
pr_warn("Failed to register probe event(%d)\n", ret); return ret;
}
/* Verify fprobe is sane. */
ret = trace_fprobe_verify_target(tf); if (ret < 0)
unregister_fprobe_event(tf); else
dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
if (!data->tpoint && !strcmp(data->tp_name, tp->name)) { /* If module is not specified, try getting module refcount. */ if (!data->mod && mod) { /* If failed to get refcount, ignore this tracepoint. */ if (!try_module_get(mod)) return;
if (!data->tpoint && !strcmp(data->tp_name, tp->name))
data->tpoint = tp;
}
/* * Find a tracepoint from kernel and module. If the tracepoint is on the module, * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is * not NULL, caller must call module_put(*tp_mod) after used the tracepoint.
*/ staticstruct tracepoint *find_tracepoint(constchar *tp_name, struct module **tp_mod)
{ struct __find_tracepoint_cb_data data = {
.tp_name = tp_name,
.mod = NULL,
};
#ifdef CONFIG_MODULES /* * Find a tracepoint from specified module. In this case, this does not get the * module's refcount. The caller must ensure the module is not freed.
*/ staticstruct tracepoint *find_tracepoint_in_module(struct module *mod, constchar *tp_name)
{ struct __find_tracepoint_cb_data data = {
.tp_name = tp_name,
.mod = mod,
};
if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING) return NOTIFY_DONE;
mutex_lock(&tracepoint_user_mutex);
for_each_tracepoint_user(tuser) { if (val == MODULE_STATE_COMING) { /* This is not a tracepoint in this module. Skip it. */
tpoint = find_tracepoint_in_module(tp_mod->mod, tuser->name); if (!tpoint) continue;
WARN_ON_ONCE(tracepoint_user_register_again(tuser, tpoint));
} elseif (val == MODULE_STATE_GOING &&
tracepoint_user_within_module(tuser, tp_mod->mod)) { /* Unregister all tracepoint_user in this module. */
tracepoint_user_unregister_clear(tuser);
}
}
mutex_unlock(&tracepoint_user_mutex);
if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING) return NOTIFY_DONE;
mutex_lock(&event_mutex);
for_each_trace_fprobe(tf, pos) { /* Skip fprobe and disabled tprobe events. */ if (!trace_fprobe_is_tracepoint(tf) || !tf->tuser) continue;
/* Before this notification, tracepoint notifier has already done. */ if (val == MODULE_STATE_COMING &&
tracepoint_user_within_module(tf->tuser, mod)) { unsignedlong ip = tracepoint_user_ip(tf->tuser);
WARN_ON_ONCE(register_fprobe_ips(&tf->fp, &ip, 1));
} elseif (val == MODULE_STATE_GOING && /* * tracepoint_user_within_module() does not work here because * tracepoint_user is already unregistered and cleared tpoint. * Instead, checking whether the fprobe is registered but * tpoint is cleared(unregistered). Such unbalance probes * must be adjusted anyway.
*/
trace_fprobe_is_registered(tf) &&
!tf->tuser->tpoint) {
unregister_fprobe(&tf->fp);
}
}
mutex_unlock(&event_mutex);
return NOTIFY_DONE;
}
/* NOTE: this must be called after tracepoint callback */ staticstruct notifier_block tprobe_event_module_nb = {
.notifier_call = __tprobe_event_module_cb, /* Make sure this is later than tracepoint module notifier. */
.priority = -10,
}; #endif/* CONFIG_MODULES */
if (is_tracepoint) {
tmp = *symbol; while (*tmp && (isalnum(*tmp) || *tmp == '_'))
tmp++; if (*tmp) { /* find a wrong character. */
trace_probe_log_err(tmp - *symbol, BAD_TP_NAME);
kfree(*symbol);
*symbol = NULL; return -EINVAL;
}
}
/* If there is $retval, this should be a return fprobe. */ for (i = 2; i < argc; i++) {
tmp = strstr(argv[i], "$retval"); if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') { if (is_tracepoint) {
trace_probe_log_set_index(i);
trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
kfree(*symbol);
*symbol = NULL; return -EINVAL;
}
*is_return = true; break;
}
} return 0;
}
staticint trace_fprobe_create_internal(int argc, constchar *argv[], struct traceprobe_parse_context *ctx)
{ /* * Argument syntax: * - Add fentry probe: * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS] * - Add fexit probe: * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS] * - Add tracepoint probe: * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS] * * Fetch args: * $retval : fetch return value * $stack : fetch stack address * $stackN : fetch Nth entry of stack (N:0-) * $argN : fetch Nth argument (N:1-) * $comm : fetch current task comm * @ADDR : fetch memory at ADDR (ADDR should be in kernel) * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) * Dereferencing memory fetch: * +|-offs(ARG) : fetch memory at ARG +|- offs address. * Alias name of args: * NAME=FETCHARG : set NAME as alias of FETCHARG. * Type of args: * FETCHARG:TYPE : use TYPE instead of unsigned long.
*/ struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; constchar *event = NULL, *group = FPROBE_EVENT_SYSTEM; struct module *mod __free(module_put) = NULL; constchar **new_argv __free(kfree) = NULL; char *symbol __free(kfree) = NULL; char *ebuf __free(kfree) = NULL; char *gbuf __free(kfree) = NULL; char *sbuf __free(kfree) = NULL; char *abuf __free(kfree) = NULL; char *dbuf __free(kfree) = NULL; int i, new_argc = 0, ret = 0; bool is_tracepoint = false; bool is_return = false;
if (argv[0][0] == 't') {
is_tracepoint = true;
group = TRACEPOINT_EVENT_SYSTEM;
}
if (argv[0][1] != '\0') { if (argv[0][1] != ':') {
trace_probe_log_set_index(0);
trace_probe_log_err(1, BAD_MAXACT); return -EINVAL;
}
event = &argv[0][2];
}
trace_probe_log_set_index(1);
/* a symbol(or tracepoint) must be specified */
ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint); if (ret < 0) return -EINVAL;
trace_probe_log_set_index(0); if (event) {
gbuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); if (!gbuf) return -ENOMEM;
ret = traceprobe_parse_event_name(&event, &group, gbuf,
event - argv[0]); if (ret) return -EINVAL;
}
if (!event) {
ebuf = kmalloc(MAX_EVENT_NAME_LEN, GFP_KERNEL); if (!ebuf) return -ENOMEM; /* Make a new event name */ if (is_tracepoint)
snprintf(ebuf, MAX_EVENT_NAME_LEN, "%s%s",
isdigit(*symbol) ? "_" : "", symbol); else
snprintf(ebuf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
is_return ? "exit" : "entry");
sanitize_event_name(ebuf);
event = ebuf;
}
if (is_return)
ctx->flags |= TPARG_FL_RETURN; else
ctx->flags |= TPARG_FL_FENTRY;
ctx->funcname = NULL; if (is_tracepoint) { /* Get tracepoint and lock its module until the end of the registration. */ struct tracepoint *tpoint;
ctx->flags |= TPARG_FL_TPOINT;
mod = NULL;
tpoint = find_tracepoint(symbol, &mod); if (tpoint) {
sbuf = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); if (!sbuf) return -ENOMEM;
ctx->funcname = kallsyms_lookup((unsignedlong)tpoint->probestub,
NULL, NULL, NULL, sbuf);
}
} if (!ctx->funcname)
ctx->funcname = symbol;
ret = traceprobe_expand_dentry_args(argc, argv, &dbuf); if (ret) return ret;
/* setup a probe */
tf = alloc_trace_fprobe(group, event, symbol, argc, is_return, is_tracepoint); if (IS_ERR(tf)) {
ret = PTR_ERR(tf); /* This must return -ENOMEM, else there is a bug */
WARN_ON_ONCE(ret != -ENOMEM); return ret;
}
/* parse arguments */ for (i = 0; i < argc; i++) {
trace_probe_log_set_index(i + 2);
ctx->offset = 0;
ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], ctx); if (ret) return ret; /* This can be -ENOMEM */
}
for (i = 0; i < tf->tp.nr_args; i++)
seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
seq_putc(m, '\n');
return 0;
}
/* * Enable trace_probe * if the file is NULL, enable "perf" handler, or enable "trace" handler.
*/ staticint enable_trace_fprobe(struct trace_event_call *call, struct trace_event_file *file)
{ struct trace_probe *tp; struct trace_fprobe *tf; bool enabled; int ret = 0;
tp = trace_probe_primary_from_call(call); if (WARN_ON_ONCE(!tp)) return -ENODEV;
enabled = trace_probe_is_enabled(tp);
/* This also changes "enabled" state */ if (file) {
ret = trace_probe_add_file(tp, file); if (ret) return ret;
} else
trace_probe_set_flag(tp, TP_FLAG_PROFILE);
if (!enabled) {
list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
ret = __register_trace_fprobe(tf); if (ret < 0) return ret;
}
}
return 0;
}
/* * Disable trace_probe * if the file is NULL, disable "perf" handler, or disable "trace" handler.
*/ staticint disable_trace_fprobe(struct trace_event_call *call, struct trace_event_file *file)
{ struct trace_fprobe *tf; struct trace_probe *tp;
tp = trace_probe_primary_from_call(call); if (WARN_ON_ONCE(!tp)) return -ENODEV;
if (file) { if (!trace_probe_get_file_link(tp, file)) return -ENOENT; if (!trace_probe_has_single_file(tp)) goto out;
trace_probe_clear_flag(tp, TP_FLAG_TRACE);
} else
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
if (!trace_probe_is_enabled(tp)) {
list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
unregister_fprobe(&tf->fp); if (tf->tuser) {
tracepoint_user_put(tf->tuser);
tf->tuser = NULL;
}
}
}
out: if (file) /* * Synchronization is done in below function. For perf event, * file == NULL and perf_trace_event_unreg() calls * tracepoint_synchronize_unregister() to ensure synchronize * event. We don't need to care about it.
*/
trace_probe_remove_file(tp, file);
return 0;
}
/* * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
*/ staticint fprobe_register(struct trace_event_call *event, enum trace_reg type, void *data)
{ struct trace_event_file *file = data;
switch (type) { case TRACE_REG_REGISTER: return enable_trace_fprobe(event, file); case TRACE_REG_UNREGISTER: return disable_trace_fprobe(event, file);
#ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: return enable_trace_fprobe(event, NULL); case TRACE_REG_PERF_UNREGISTER: return disable_trace_fprobe(event, NULL); case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: case TRACE_REG_PERF_DEL: return 0; #endif
} return 0;
}
/* * Register dynevent at core_initcall. This allows kernel to setup fprobe * events in postcore_initcall without tracefs.
*/ static __init int init_fprobe_trace_early(void)
{ int ret;
ret = dyn_event_register(&trace_fprobe_ops); if (ret) return ret;
#ifdef CONFIG_MODULES
ret = register_tracepoint_module_notifier(&tracepoint_module_nb); if (ret) return ret;
ret = register_module_notifier(&tprobe_event_module_nb); if (ret) return ret; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.