/* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; staticint __maybe_unused last_ftrace_enabled;
/* Current function tracing op */ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; /* What to set function_trace_op to */ staticstruct ftrace_ops *set_function_trace_op;
/* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled.
*/ staticint ftrace_disabled __read_mostly;
/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ void ftrace_ops_list_func(unsignedlong ip, unsignedlong parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS /* * Stub used to invoke the list ops without requiring a separate trampoline.
*/ conststruct ftrace_ops ftrace_list_ops = {
.func = ftrace_ops_list_func,
.flags = FTRACE_OPS_FL_STUB,
};
/* * Stub used when a call site is disabled. May be called transiently by threads * which have made it into ftrace_caller but haven't yet recovered the ops at * the point the call site is disabled.
*/ conststruct ftrace_ops ftrace_nop_ops = {
.func = ftrace_ops_nop_func,
.flags = FTRACE_OPS_FL_STUB,
}; #endif
/* Call this function for when a callback filters on set_ftrace_pid */ staticvoid ftrace_pid_func(unsignedlong ip, unsignedlong parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs)
{ struct trace_array *tr = op->private; int pid;
if (tr) {
pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); if (pid == FTRACE_PID_IGNORE) return; if (pid != FTRACE_PID_TRACE &&
pid != current->pid) return;
}
op->saved_func(ip, parent_ip, op, fregs);
}
void ftrace_sync_ipi(void *data)
{ /* Probably not needed, but do it anyway */
smp_rmb();
}
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{ /* * If this is a dynamic or RCU ops, or we force list func, * then it needs to call the list anyway.
*/ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
FTRACE_FORCE_LIST_FUNC) return ftrace_ops_list_func;
/* * Prepare the ftrace_ops that the arch callback will use. * If there's only one ftrace_ops registered, the ftrace_ops_list * will point to the ops we want.
*/
set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
lockdep_is_held(&ftrace_lock));
/* If there's no ftrace_ops registered, just call the stub function */ if (set_function_trace_op == &ftrace_list_end) {
func = ftrace_stub;
/* * If we are at the end of the list and this ops is * recursion safe and not dynamic and the arch supports passing ops, * then have the mcount trampoline call the function directly.
*/
} elseif (rcu_dereference_protected(ftrace_ops_list->next,
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
func = ftrace_ops_get_list_func(ftrace_ops_list);
} else { /* Just use the default ftrace_ops */
set_function_trace_op = &ftrace_list_end;
func = ftrace_ops_list_func;
}
/* If there's no change, then do nothing more here */ if (ftrace_trace_function == func) return;
/* * If we are using the list function, it doesn't care * about the function_trace_ops.
*/ if (func == ftrace_ops_list_func) {
ftrace_trace_function = func; /* * Don't even bother setting function_trace_ops, * it would be racy to do so anyway.
*/ return;
}
#ifndef CONFIG_DYNAMIC_FTRACE /* * For static tracing, we need to be a bit more careful. * The function change takes affect immediately. Thus, * we need to coordinate the setting of the function_trace_ops * with the setting of the ftrace_trace_function. * * Set the function to the list ops, which will call the * function we want, albeit indirectly, but it handles the * ftrace_ops and doesn't depend on function_trace_op.
*/
ftrace_trace_function = ftrace_ops_list_func; /* * Make sure all CPUs see this. Yes this is slow, but static * tracing is slow and nasty to have enabled.
*/
synchronize_rcu_tasks_rude(); /* Now all cpus are using the list ops. */
function_trace_op = set_function_trace_op; /* Make sure the function_trace_op is visible on all CPUs */
smp_wmb(); /* Nasty way to force a rmb on all cpus */
smp_call_function(ftrace_sync_ipi, NULL, 1); /* OK, we are all set to update the ftrace_trace_function now! */ #endif/* !CONFIG_DYNAMIC_FTRACE */
/* * We are entering ops into the list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees * the ops pointer included into the list.
*/
rcu_assign_pointer(*list, ops);
}
/* * If we are removing the last function, then simply point * to the ftrace_stub.
*/ if (rcu_dereference_protected(*list,
lockdep_is_held(&ftrace_lock)) == ops &&
rcu_dereference_protected(ops->next,
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
rcu_assign_pointer(*list, &ftrace_list_end); return 0;
}
for (p = list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break;
int __register_ftrace_function(struct ftrace_ops *ops)
{ if (ops->flags & FTRACE_OPS_FL_DELETED) return -EINVAL;
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) return -EBUSY;
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* * If the ftrace_ops specifies SAVE_REGS, then it only can be used * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
*/ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
!(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) return -EINVAL;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
ops->flags |= FTRACE_OPS_FL_SAVE_REGS; #endif if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) return -EBUSY;
if (!is_kernel_core_data((unsignedlong)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
add_ftrace_ops(&ftrace_ops_list, ops);
/* Always save the function, and reset at unregistering */
ops->saved_func = ops->func;
if (ftrace_pids_enabled(ops))
ops->func = ftrace_pid_func;
ftrace_update_trampoline(ops);
if (ftrace_enabled)
update_ftrace_function();
return 0;
}
int __unregister_ftrace_function(struct ftrace_ops *ops)
{ int ret;
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) return -EBUSY;
staticint ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
{ struct ftrace_profile_page *pg; int functions; int pages; int i;
/* If we already allocated, do nothing */ if (stat->pages) return 0;
stat->pages = (void *)get_zeroed_page(GFP_KERNEL); if (!stat->pages) return -ENOMEM;
#ifdef CONFIG_DYNAMIC_FTRACE
functions = ftrace_update_tot_cnt; #else /* * We do not know the number of functions that exist because * dynamic tracing is what counts them. With past experience * we have around 20K functions. That should be more than enough. * It is highly unlikely we will execute every function in * the kernel.
*/
functions = 20000; #endif
staticint ftrace_profile_init_cpu(int cpu)
{ struct ftrace_profile_stat *stat; int size;
stat = &per_cpu(ftrace_profile_stats, cpu);
if (stat->hash) { /* If the profile is already created, simply reset it */
ftrace_profile_reset(stat); return 0;
}
/* * We are profiling all functions, but usually only a few thousand * functions are hit. We'll make a hash of 1024 items.
*/
size = FTRACE_PROFILE_HASH_SIZE;
/* * The memory is already allocated, this simply finds a new record to use.
*/ staticstruct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsignedlong ip)
{ struct ftrace_profile *rec = NULL;
/* If the calltime was zero'd ignore it */ if (!profile_data || !profile_data->calltime) return;
calltime = rettime - profile_data->calltime;
if (!fgraph_sleep_time) { if (current->ftrace_sleeptime)
calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
}
if (!fgraph_graph_time) { struct profile_fgraph_data *parent_data;
/* Append this call time to the parent time to subtract */
parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1); if (parent_data)
parent_data->subtime += calltime;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret;
val = !!val;
guard(mutex)(&ftrace_profile_lock); if (ftrace_profile_enabled ^ val) { if (val) {
ret = ftrace_profile_init(); if (ret < 0) return ret;
ret = register_ftrace_profiler(); if (ret < 0) return ret;
ftrace_profile_enabled = 1;
} else {
ftrace_profile_enabled = 0; /* * unregister_ftrace_profiler calls stop_machine * so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
}
*ppos += cnt;
return cnt;
}
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{ char buf[64]; /* big enough to hold a number */ int r;
/* used to initialize the real stat files */ staticstruct tracer_stat function_stats __initdata = {
.name = "functions",
.stat_start = function_stat_start,
.stat_next = function_stat_next,
.stat_cmp = function_stat_cmp,
.stat_headers = function_stat_headers,
.stat_show = function_stat_show
};
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{ struct ftrace_profile_stat *stat; char *name; int ret; int cpu;
for_each_possible_cpu(cpu) {
stat = &per_cpu(ftrace_profile_stats, cpu);
name = kasprintf(GFP_KERNEL, "function%d", cpu); if (!name) { /* * The files created are permanent, if something happens * we still do not free memory.
*/
WARN(1, "Could not allocate stat file for cpu %d\n",
cpu); return;
}
stat->stat = function_stats;
stat->stat.name = name;
ret = register_stat_tracer(&stat->stat); if (ret) {
WARN(1, "Could not register function stat for cpu %d\n",
cpu);
kfree(name); return;
}
}
/* * Set when doing a global update, like enabling all recs or disabling them. * It is not set when just updating a single ftrace_ops.
*/ staticbool update_all_ops;
/* * We make these constant because no one should touch them, * but they are used as the default "empty hash", to avoid allocating * it all the time. These are in a read only section such that if * anyone does try to modify it, it will cause an exception.
*/ staticconststruct hlist_head empty_buckets[1]; staticconststruct ftrace_hash empty_hash = {
.buckets = (struct hlist_head *)empty_buckets,
}; #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
/* * Used by the stack unwinder to know about dynamic ftrace trampolines.
*/ struct ftrace_ops *ftrace_ops_trampoline(unsignedlong addr)
{ struct ftrace_ops *op = NULL;
/* * Some of the ops may be dynamically allocated, * they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) { /* * This is to check for dynamically allocated trampolines. * Trampolines that are in kernel text will have * core_kernel_text() return true.
*/ if (op->trampoline && op->trampoline_size) if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) {
preempt_enable_notrace(); return op;
}
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
return NULL;
}
/* * This is used by __kernel_text_address() to return true if the * address is on a dynamically allocated trampoline that would * not return true for either core_kernel_text() or * is_module_text_address().
*/ bool is_ftrace_trampoline(unsignedlong addr)
{ return ftrace_ops_trampoline(addr) != NULL;
}
struct ftrace_page { struct ftrace_page *next; struct dyn_ftrace *records; int index; int order;
};
/* Only use this function if ftrace_hash_empty() has already been tested */ static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash *hash, unsignedlong ip)
{ unsignedlong key; struct ftrace_func_entry *entry; struct hlist_head *hhd;
/** * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash * @hash: The hash to look at * @ip: The instruction pointer to test * * Search a given @hash to see if a given instruction pointer (@ip) * exists in it. * * Returns: the entry that holds the @ip if found. NULL otherwise.
*/ struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsignedlong ip)
{ if (ftrace_hash_empty(hash)) return NULL;
/* * Allocate a new hash and remove entries from @src and move them to the new hash. * On success, the @src hash will be empty and should be freed.
*/ staticstruct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
{ struct ftrace_func_entry *entry; struct ftrace_hash *new_hash; struct hlist_head *hhd; struct hlist_node *tn; int bits = 0; int i;
/* * Use around half the size (max bit of it), but * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
*/
bits = fls(size / 2);
/* Don't allocate too much */ if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
new_hash = alloc_ftrace_hash(bits); if (!new_hash) return NULL;
/* Move the @src entries to a newly allocated hash */ staticstruct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash *src)
{ int size = src->count;
/* * If the new source is empty, just return the empty_hash.
*/ if (ftrace_hash_empty(src)) return EMPTY_HASH;
return __move_hash(src, size);
}
/** * ftrace_hash_move - move a new hash to a filter and do updates * @ops: The ops with the hash that @dst points to * @enable: True if for the filter hash, false for the notrace hash * @dst: Points to the @ops hash that should be updated * @src: The hash to update @dst with * * This is called when an ftrace_ops hash is being updated and the * the kernel needs to reflect this. Note, this only updates the kernel * function callbacks if the @ops is enabled (not to be confused with * @enable above). If the @ops is enabled, its hash determines what * callbacks get called. This function gets called when the @ops hash * is updated and it requires new callbacks. * * On success the elements of @src is moved to @dst, and @dst is updated * properly, as well as the functions determined by the @ops hashes * are now calling the @ops callback function. * * Regardless of return type, @src should be freed with free_ftrace_hash().
*/ staticint
ftrace_hash_move(struct ftrace_ops *ops, int enable, struct ftrace_hash **dst, struct ftrace_hash *src)
{ struct ftrace_hash *new_hash; int ret;
/* Reject setting notrace hash on IPMODIFY ftrace_ops */ if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) return -EINVAL;
new_hash = __ftrace_hash_move(src); if (!new_hash) return -ENOMEM;
/* Make sure this can be applied if it is IPMODIFY ftrace_ops */ if (enable) { /* IPMODIFY should be updated only when filter_hash updating */
ret = ftrace_hash_ipmodify_update(ops, new_hash); if (ret < 0) {
free_ftrace_hash(new_hash); return ret;
}
}
/* * Remove the current set, update the hash and add * them back.
*/
ftrace_hash_rec_disable_modify(ops);
rcu_assign_pointer(*dst, new_hash);
ftrace_hash_rec_enable_modify(ops);
return 0;
}
staticbool hash_contains_ip(unsignedlong ip, struct ftrace_ops_hash *hash)
{ /* * The function record is a match if it exists in the filter * hash and not in the notrace hash. Note, an empty hash is * considered a match for the filter hash, but an empty * notrace hash is considered not in the notrace hash.
*/ return (ftrace_hash_empty(hash->filter_hash) ||
__ftrace_lookup_ip(hash->filter_hash, ip)) &&
(ftrace_hash_empty(hash->notrace_hash) ||
!__ftrace_lookup_ip(hash->notrace_hash, ip));
}
/* * Test the hashes for this ops to see if we want to call * the ops->func or not. * * It's a match if the ip is in the ops->filter_hash or * the filter_hash does not exist or is empty, * AND * the ip is not in the ops->notrace_hash. * * This needs to be called with preemption disabled as * the hashes are freed with call_rcu().
*/ int
ftrace_ops_test(struct ftrace_ops *ops, unsignedlong ip, void *regs)
{ struct ftrace_ops_hash hash; int ret;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /* * There's a small race when adding ops that the ftrace handler * that wants regs, may be called without them. We can not * allow that handler to be called if regs is NULL.
*/ if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) return 0; #endif
if (hash_contains_ip(ip, &hash))
ret = 1; else
ret = 0;
return ret;
}
/* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto.
*/ #define do_for_each_ftrace_rec(pg, rec) \ for (pg = ftrace_pages_start; pg; pg = pg->next) { \ int _____i; \ for (_____i = 0; _____i < pg->index; _____i++) { \
rec = &pg->records[_____i];
/** * ftrace_location_range - return the first address of a traced location * if it touches the given ip range * @start: start of range to search. * @end: end of range to search (inclusive). @end points to the last byte * to check. * * Returns: rec->ip if the related ftrace location is a least partly within * the given address range. That is, the first address of the instruction * that is either a NOP or call to the function tracer. It checks the ftrace * internal tables to determine if the address belongs or not.
*/ unsignedlong ftrace_location_range(unsignedlong start, unsignedlong end)
{ struct dyn_ftrace *rec; unsignedlong ip = 0;
rcu_read_lock();
rec = lookup_rec(start, end); if (rec)
ip = rec->ip;
rcu_read_unlock();
return ip;
}
/** * ftrace_location - return the ftrace location * @ip: the instruction pointer to check * * Returns: * * If @ip matches the ftrace location, return @ip. * * If @ip matches sym+0, return sym's ftrace location. * * Otherwise, return 0.
*/ unsignedlong ftrace_location(unsignedlong ip)
{ unsignedlong loc; unsignedlong offset; unsignedlong size;
loc = ftrace_location_range(ip, ip); if (!loc) { if (!kallsyms_lookup_size_offset(ip, &size, &offset)) return 0;
/* map sym+0 to __fentry__ */ if (!offset)
loc = ftrace_location_range(ip, ip + size - 1);
} return loc;
}
/** * ftrace_text_reserved - return true if range contains an ftrace location * @start: start of range to search * @end: end of range to search (inclusive). @end points to the last byte to check. * * Returns: 1 if @start and @end contains a ftrace location. * That is, the instruction that is either a NOP or call to * the function tracer. It checks the ftrace internal tables to * determine if the address belongs or not.
*/ int ftrace_text_reserved(constvoid *start, constvoid *end)
{ unsignedlong ret;
ret = ftrace_location_range((unsignedlong)start,
(unsignedlong)end);
return (int)!!ret;
}
/* Test if ops registered to this rec needs regs */ staticbool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
{ struct ftrace_ops *ops; bool keep_regs = false;
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) { /* pass rec in as regs to have non-NULL val */ if (ftrace_ops_test(ops, rec->ip, rec)) { if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
keep_regs = true; break;
}
}
}
staticbool skip_record(struct dyn_ftrace *rec)
{ /* * At boot up, weak functions are set to disable. Function tracing * can be enabled before they are, and they still need to be disabled now. * If the record is disabled, still continue if it is marked as already * enabled (this is needed to keep the accounting working).
*/ return rec->flags & FTRACE_FL_DISABLED &&
!(rec->flags & FTRACE_FL_ENABLED);
}
/* * This is the main engine to the ftrace updates to the dyn_ftrace records. * * It will iterate through all the available ftrace functions * (the ones that ftrace can have callbacks to) and set the flags * in the associated dyn_ftrace records. * * @inc: If true, the functions associated to @ops are added to * the dyn_ftrace records, otherwise they are removed.
*/ staticbool __ftrace_hash_rec_update(struct ftrace_ops *ops, bool inc)
{ struct ftrace_hash *hash; struct ftrace_hash *notrace_hash; struct ftrace_page *pg; struct dyn_ftrace *rec; bool update = false; int count = 0; int all = false;
/* Only update if the ops has been registered */ if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) returnfalse;
/* * If the count is zero, we update all records. * Otherwise we just update the items in the hash.
*/
hash = ops->func_hash->filter_hash;
notrace_hash = ops->func_hash->notrace_hash; if (ftrace_hash_empty(hash))
all = true;
do_for_each_ftrace_rec(pg, rec) { int in_notrace_hash = 0; int in_hash = 0; int match = 0;
if (skip_record(rec)) continue;
if (all) { /* * Only the filter_hash affects all records. * Update if the record is not in the notrace hash.
*/ if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
match = 1;
} else {
in_hash = !!ftrace_lookup_ip(hash, rec->ip);
in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
/* * We want to match all functions that are in the hash but * not in the other hash.
*/ if (in_hash && !in_notrace_hash)
match = 1;
} if (!match) continue;
if (inc) {
rec->flags++; if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) returnfalse;
if (ops->flags & FTRACE_OPS_FL_DIRECT)
rec->flags |= FTRACE_FL_DIRECT;
/* * If there's only a single callback registered to a * function, and the ops has a trampoline registered * for it, then we can call it directly.
*/ if (ftrace_rec_count(rec) == 1 && ops->trampoline)
rec->flags |= FTRACE_FL_TRAMP; else /* * If we are adding another function callback * to this function, and the previous had a * custom trampoline in use, then we need to go * back to the default trampoline.
*/
rec->flags &= ~FTRACE_FL_TRAMP;
/* * If any ops wants regs saved for this function * then all ops will get saved regs.
*/ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
rec->flags |= FTRACE_FL_REGS;
} else { if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) returnfalse;
rec->flags--;
/* * Only the internal direct_ops should have the * DIRECT flag set. Thus, if it is removing a * function, then that function should no longer * be direct.
*/ if (ops->flags & FTRACE_OPS_FL_DIRECT)
rec->flags &= ~FTRACE_FL_DIRECT;
/* * If the rec had REGS enabled and the ops that is * being removed had REGS set, then see if there is * still any ops for this record that wants regs. * If not, we can stop recording them.
*/ if (ftrace_rec_count(rec) > 0 &&
rec->flags & FTRACE_FL_REGS &&
ops->flags & FTRACE_OPS_FL_SAVE_REGS) { if (!test_rec_ops_needs_regs(rec))
rec->flags &= ~FTRACE_FL_REGS;
}
/* * The TRAMP needs to be set only if rec count * is decremented to one, and the ops that is * left has a trampoline. As TRAMP can only be * enabled if there is only a single ops attached * to it.
*/ if (ftrace_rec_count(rec) == 1 &&
ftrace_find_tramp_ops_any_other(rec, ops))
rec->flags |= FTRACE_FL_TRAMP; else
rec->flags &= ~FTRACE_FL_TRAMP;
/* * flags will be cleared in ftrace_check_record() * if rec count is zero.
*/
}
/* * If the rec has a single associated ops, and ops->func can be * called directly, allow the call site to call via the ops.
*/ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
ftrace_rec_count(rec) == 1 &&
ftrace_ops_get_func(ops) == ops->func)
rec->flags |= FTRACE_FL_CALL_OPS; else
rec->flags &= ~FTRACE_FL_CALL_OPS;
count++;
/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
/* Shortcut, if we handled all records, we are done. */ if (!all && count == hash->count) return update;
} while_for_each_ftrace_rec();
return update;
}
/* * This is called when an ops is removed from tracing. It will decrement * the counters of the dyn_ftrace records for all the functions that * the @ops attached to.
*/ staticbool ftrace_hash_rec_disable(struct ftrace_ops *ops)
{ return __ftrace_hash_rec_update(ops, false);
}
/* * This is called when an ops is added to tracing. It will increment * the counters of the dyn_ftrace records for all the functions that * the @ops attached to.
*/ staticbool ftrace_hash_rec_enable(struct ftrace_ops *ops)
{ return __ftrace_hash_rec_update(ops, true);
}
/* * This function will update what functions @ops traces when its filter * changes. * * The @inc states if the @ops callbacks are going to be added or removed. * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace * records are update via: * * ftrace_hash_rec_disable_modify(ops); * ops->hash = new_hash * ftrace_hash_rec_enable_modify(ops); * * Where the @ops is removed from all the records it is tracing using * its old hash. The @ops hash is updated to the new hash, and then * the @ops is added back to the records so that it is tracing all * the new functions.
*/ staticvoid ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc)
{ struct ftrace_ops *op;
__ftrace_hash_rec_update(ops, inc);
if (ops->func_hash != &global_ops.local_hash) return;
/* * If the ops shares the global_ops hash, then we need to update * all ops that are enabled and use this hash.
*/
do_for_each_ftrace_op(op, ftrace_ops_list) { /* Already done */ if (op == ops) continue; if (op->func_hash == &global_ops.local_hash)
__ftrace_hash_rec_update(op, inc);
} while_for_each_ftrace_op(op);
}
/* * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK * or no-needed to update, -EBUSY if it detects a conflict of the flag * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. * Note that old_hash and new_hash has below meanings * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) * - If the hash is EMPTY_HASH, it hits nothing * - Anything else hits the recs which match the hash entries. * * DIRECT ops does not have IPMODIFY flag, but we still need to check it * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate * the return value to the caller and eventually to the owner of the DIRECT * ops.
*/ staticint __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, struct ftrace_hash *old_hash, struct ftrace_hash *new_hash)
{ struct ftrace_page *pg; struct dyn_ftrace *rec, *end = NULL; int in_old, in_new; bool is_ipmodify, is_direct;
/* Only update if the ops has been registered */ if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) return 0;
/* neither IPMODIFY nor DIRECT, skip */ if (!is_ipmodify && !is_direct) return 0;
if (WARN_ON_ONCE(is_ipmodify && is_direct)) return 0;
/* * Since the IPMODIFY and DIRECT are very address sensitive * actions, we do not allow ftrace_ops to set all functions to new * hash.
*/ if (!new_hash || !old_hash) return -EINVAL;
/* We need to update only differences of filter_hash */
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip); if (in_old == in_new) continue;
if (in_new) { if (rec->flags & FTRACE_FL_IPMODIFY) { int ret;
/* Cannot have two ipmodify on same rec */ if (is_ipmodify) goto rollback;
FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
/* * Another ops with IPMODIFY is already * attached. We are now attaching a direct * ops. Run SHARE_IPMODIFY_SELF, to check * whether sharing is supported.
*/ if (!ops->ops_func) return -EBUSY;
ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); if (ret) return ret;
} elseif (is_ipmodify) {
rec->flags |= FTRACE_FL_IPMODIFY;
}
} elseif (is_ipmodify) {
rec->flags &= ~FTRACE_FL_IPMODIFY;
}
} while_for_each_ftrace_rec();
return 0;
rollback:
end = rec;
/* Roll back what we did above */
do_for_each_ftrace_rec(pg, rec) {
staticvoid print_bug_type(void)
{ switch (ftrace_bug_type) { case FTRACE_BUG_UNKNOWN: break; case FTRACE_BUG_INIT:
pr_info("Initializing ftrace call sites\n"); break; case FTRACE_BUG_NOP:
pr_info("Setting ftrace call site to NOP\n"); break; case FTRACE_BUG_CALL:
pr_info("Setting ftrace call site to call ftrace function\n"); break; case FTRACE_BUG_UPDATE:
pr_info("Updating ftrace call site to call a different ftrace function\n"); break;
}
}
/** * ftrace_bug - report and shutdown function tracer * @failed: The failed type (EFAULT, EINVAL, EPERM) * @rec: The record that failed * * The arch code that enables or disables the function tracing * can call ftrace_bug() when it has detected a problem in * modifying the code. @failed should be one of either: * EFAULT - if the problem happens on reading the @ip address * EINVAL - if what is read at @ip is not what was expected * EPERM - if the problem happens on writing to the @ip address
*/ void ftrace_bug(int failed, struct dyn_ftrace *rec)
{ unsignedlong ip = rec ? rec->ip : 0;
if (skip_record(rec)) return FTRACE_UPDATE_IGNORE;
/* * If we are updating calls: * * If the record has a ref count, then we need to enable it * because someone is using it. * * Otherwise we make sure its disabled. * * If we are disabling calls, then disable all records that * are enabled.
*/ if (enable && ftrace_rec_count(rec))
flag = FTRACE_FL_ENABLED;
/* * If enabling and the REGS flag does not match the REGS_EN, or * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore * this record. Set flags to fail the compare against ENABLED. * Same for direct calls.
*/ if (flag) { if (!(rec->flags & FTRACE_FL_REGS) !=
!(rec->flags & FTRACE_FL_REGS_EN))
flag |= FTRACE_FL_REGS;
if (!(rec->flags & FTRACE_FL_TRAMP) !=
!(rec->flags & FTRACE_FL_TRAMP_EN))
flag |= FTRACE_FL_TRAMP;
/* * Direct calls are special, as count matters. * We must test the record for direct, if the * DIRECT and DIRECT_EN do not match, but only * if the count is 1. That's because, if the * count is something other than one, we do not * want the direct enabled (it will be done via the * direct helper). But if DIRECT_EN is set, and * the count is not one, we need to clear it. *
*/ if (ftrace_rec_count(rec) == 1) { if (!(rec->flags & FTRACE_FL_DIRECT) !=
!(rec->flags & FTRACE_FL_DIRECT_EN))
flag |= FTRACE_FL_DIRECT;
} elseif (rec->flags & FTRACE_FL_DIRECT_EN) {
flag |= FTRACE_FL_DIRECT;
}
/* * Ops calls are special, as count matters. * As with direct calls, they must only be enabled when count * is one, otherwise they'll be handled via the list ops.
*/ if (ftrace_rec_count(rec) == 1) { if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
!(rec->flags & FTRACE_FL_CALL_OPS_EN))
flag |= FTRACE_FL_CALL_OPS;
} elseif (rec->flags & FTRACE_FL_CALL_OPS_EN) {
flag |= FTRACE_FL_CALL_OPS;
}
}
/* If the state of this record hasn't changed, then do nothing */ if ((rec->flags & FTRACE_FL_ENABLED) == flag) return FTRACE_UPDATE_IGNORE;
if (flag) { /* Save off if rec is being enabled (for return value) */
flag ^= rec->flags & FTRACE_FL_ENABLED;
if (update) {
rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; if (flag & FTRACE_FL_REGS) { if (rec->flags & FTRACE_FL_REGS)
rec->flags |= FTRACE_FL_REGS_EN; else
rec->flags &= ~FTRACE_FL_REGS_EN;
} if (flag & FTRACE_FL_TRAMP) { if (rec->flags & FTRACE_FL_TRAMP)
rec->flags |= FTRACE_FL_TRAMP_EN; else
rec->flags &= ~FTRACE_FL_TRAMP_EN;
}
/* Keep track of anything that modifies the function */ if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
rec->flags |= FTRACE_FL_MODIFIED;
if (flag & FTRACE_FL_DIRECT) { /* * If there's only one user (direct_ops helper) * then we can call the direct function * directly (no ftrace trampoline).
*/ if (ftrace_rec_count(rec) == 1) { if (rec->flags & FTRACE_FL_DIRECT)
rec->flags |= FTRACE_FL_DIRECT_EN; else
rec->flags &= ~FTRACE_FL_DIRECT_EN;
} else { /* * Can only call directly if there's * only one callback to the function.
*/
rec->flags &= ~FTRACE_FL_DIRECT_EN;
}
}
if (flag & FTRACE_FL_CALL_OPS) { if (ftrace_rec_count(rec) == 1) { if (rec->flags & FTRACE_FL_CALL_OPS)
rec->flags |= FTRACE_FL_CALL_OPS_EN; else
rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
} else { /* * Can only call directly if there's * only one set of associated ops.
*/
rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
}
}
}
/* * If this record is being updated from a nop, then * return UPDATE_MAKE_CALL. * Otherwise, * return UPDATE_MODIFY_CALL to tell the caller to convert * from the save regs, to a non-save regs function or * vice versa, or from a trampoline call.
*/ if (flag & FTRACE_FL_ENABLED) {
ftrace_bug_type = FTRACE_BUG_CALL; return FTRACE_UPDATE_MAKE_CALL;
}
if (update) { /* If there's no more users, clear all flags */ if (!ftrace_rec_count(rec))
rec->flags &= FTRACE_NOCLEAR_FLAGS; else /* * Just disable the record, but keep the ops TRAMP * and REGS states. The _EN flags must be disabled though.
*/
rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
FTRACE_FL_CALL_OPS_EN);
}
/** * ftrace_update_record - set a record that now is tracing or not * @rec: the record to update * @enable: set to true if the record is tracing, false to force disable * * The records that represent all functions that can be traced need * to be updated when tracing has been enabled.
*/ int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
{ return ftrace_check_record(rec, enable, true);
}
/** * ftrace_test_record - check if the record has been enabled or not * @rec: the record to test * @enable: set to true to check if enabled, false if it is disabled * * The arch code may need to test if a record is already set to * tracing to determine how to modify the function code that it * represents.
*/ int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
{ return ftrace_check_record(rec, enable, false);
}
/* * Need to check removed ops first. * If they are being removed, and this rec has a tramp, * and this rec is in the ops list, then it would be the * one with the tramp.
*/ if (removed_ops) { if (hash_contains_ip(ip, &removed_ops->old_hash)) return removed_ops;
}
/* * Need to find the current trampoline for a rec. * Now, a trampoline is only attached to a rec if there * was a single 'ops' attached to it. But this can be called * when we are adding another op to the rec or removing the * current one. Thus, if the op is being added, we can * ignore it because it hasn't attached itself to the rec * yet. * * If an ops is being modified (hooking to different functions) * then we don't care about the new functions that are being * added, just the old ones (that are probably being removed). * * If we are adding an ops to a function that already is using * a trampoline, it needs to be removed (trampolines are only * for single ops connected), then an ops that is not being * modified also needs to be checked.
*/
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (!op->trampoline) continue;
/* * If the ops is being added, it hasn't gotten to * the point to be removed from this tree yet.
*/ if (op->flags & FTRACE_OPS_FL_ADDING) continue;
/* * If the ops is being modified and is in the old * hash, then it is probably being removed from this * function.
*/ if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
hash_contains_ip(ip, &op->old_hash)) return op; /* * If the ops is not being added or modified, and it's * in its normal filter hash, then this must be the one * we want!
*/ if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
hash_contains_ip(ip, op->func_hash)) return op;
do_for_each_ftrace_op(op, ftrace_ops_list) { /* pass rec in as regs to have non-NULL val */ if (hash_contains_ip(ip, op->func_hash)) return op;
} while_for_each_ftrace_op(op);
if (hash_contains_ip(ip, op->func_hash)) { if (found) return NULL;
found = op;
}
} while_for_each_ftrace_op(op);
return found;
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS /* Protected by rcu_tasks for reading, and direct_mutex for writing */ staticstruct ftrace_hash __rcu *direct_functions = EMPTY_HASH; static DEFINE_MUTEX(direct_mutex);
/* * Search the direct_functions hash to see if the given instruction pointer * has a direct caller attached to it.
*/ unsignedlong ftrace_find_rec_direct(unsignedlong ip)
{ struct ftrace_func_entry *entry;
entry = __ftrace_lookup_ip(direct_functions, ip); if (!entry) return 0;
/** * ftrace_get_addr_new - Get the call address to set to * @rec: The ftrace record descriptor * * If the record has the FTRACE_FL_REGS set, that means that it * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.