// SPDX-License-Identifier: GPL-2.0 /* * * Function graph tracer. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> * Mostly borrowed from function tracer which * is Copyright (c) Steven Rostedt <srostedt@redhat.com> *
*/ #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fs.h>
#include"trace.h" #include"trace_output.h"
/* When set, irq functions will be ignored */ staticint ftrace_graph_skip_irqs;
struct fgraph_cpu_data {
pid_t last_pid; int depth; int depth_irq; int ignore; unsignedlong enter_funcs[FTRACE_RETFUNC_DEPTH];
};
struct fgraph_ent_args { struct ftrace_graph_ent_entry ent; /* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */ unsignedlong args[FTRACE_REGS_MAX_ARGS];
};
/* Place to preserve last processed entry. */ union { struct fgraph_ent_args ent; /* TODO allow retaddr to have args */ struct fgraph_retaddr_ent_entry rent;
}; struct ftrace_graph_ret_entry ret; int failed; int cpu;
};
#define TRACE_GRAPH_INDENT 2
unsignedint fgraph_max_depth;
staticstruct tracer_opt trace_opts[] = { /* Display overruns? (for self-debug purpose) */
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, /* Display CPU ? */
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, /* Display Overhead ? */
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, /* Display proc name/pid */
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, /* Display duration of execution */
{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, /* Display absolute time of an entry */
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, /* Display interrupts */
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, /* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, #ifdef CONFIG_FUNCTION_GRAPH_RETVAL /* Display function return value ? */
{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) }, /* Display function return value in hexadecimal format ? */
{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) }, #endif #ifdef CONFIG_FUNCTION_GRAPH_RETADDR /* Display function return address ? */
{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) }, #endif #ifdef CONFIG_FUNCTION_TRACE_ARGS /* Display function arguments ? */
{ TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) }, #endif /* Include sleep time (scheduled out) between entry and return */
{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
#ifdef CONFIG_FUNCTION_PROFILER /* Include time within nested functions */
{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, #endif
/* * DURATION column is being also used to display IRQ signs, * following values are used by print_graph_irq and others * to fill in space into DURATION column.
*/ enum {
FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
};
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API if (fregs) { for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
entry->args[i] = ftrace_regs_get_argument(fregs, i);
} #endif
/* * Do not trace a function if it's filtered by set_graph_notrace. * Make the index of ret stack negative to indicate that it should * ignore further functions. But it needs its own ret stack entry * to recover the original index in order to continue tracing after * returning from the function.
*/ if (ftrace_graph_notrace_addr(trace->func)) {
*task_var |= TRACE_GRAPH_NOTRACE; /* * Need to return 1 to have the return called * that will clear the NOTRACE bit.
*/ return 1;
}
if (!ftrace_trace_task(tr)) return 0;
if (ftrace_graph_ignore_func(gops, trace)) return 0;
if (ftrace_graph_ignore_irqs()) return 0;
if (fgraph_sleep_time) { /* Only need to record the calltime */
ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
} else {
ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes)); if (ftimes)
ftimes->sleeptime = current->ftrace_sleeptime;
} if (!ftimes) return 0;
ftimes->calltime = trace_clock_local();
/* * Stop here if tracing_threshold is set. We only write function return * events to the ring buffer.
*/ if (tracing_thresh) return 1;
trace_ctx = tracing_gen_ctx(); if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) { unsignedlong retaddr = ftrace_graph_top_ret_addr(current);
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
} else {
ret = __graph_entry(tr, trace, trace_ctx, fregs);
}
staticvoid print_graph_cpu(struct trace_seq *s, int cpu)
{ /* * Start with a space character - to make it stand out * to the right a bit when trace output is pasted into * email:
*/
trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
}
#define TRACE_GRAPH_PROCINFO_LENGTH 14
staticvoid print_graph_proc(struct trace_seq *s, pid_t pid)
{ char comm[TASK_COMM_LEN]; /* sign + log10(MAX_INT) + '\0' */ char pid_str[12]; int spaces = 0; int len; int i;
/* If the pid changed since the last trace, output this event */ staticvoid
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
{
pid_t prev_pid;
pid_t *last_pid;
/* * If the previous output failed to write to the seq buffer, * then we just reuse the data from before.
*/ if (data && data->failed) {
curr = &data->ent.ent;
next = &data->ret;
} else {
ring_iter = trace_buffer_iter(iter, iter->cpu);
/* First peek to compare current entry and the next one */ if (ring_iter)
event = ring_buffer_iter_peek(ring_iter, NULL); else { /* * We need to consume the current entry to see * the next one.
*/
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
}
if (!event) return NULL;
next = ring_buffer_event_data(event);
if (data) { /* * Save current and next entries for later reference * if the output fails.
*/ if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) {
data->rent = *(struct fgraph_retaddr_ent_entry *)curr;
} else { int size = min((int)sizeof(data->ent), (int)iter->ent_size);
memcpy(&data->ent, curr, size);
} /* * If the next event is not a return type, then * we only care about what type it is. Otherwise we can * safely copy the entire event.
*/ if (next->ent.type == TRACE_GRAPH_RET)
data->ret = *next; else
data->ret.ent.type = next->ent.type;
}
}
if (next->ent.type != TRACE_GRAPH_RET) return NULL;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func) return NULL;
/* this is a leaf, now advance the iterator */ if (ring_iter)
ring_buffer_iter_advance(ring_iter);
/* No real adata, just filling the column with spaces */ switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { case FLAGS_FILL_FULL:
trace_seq_puts(s, " | "); return; case FLAGS_FILL_START:
trace_seq_puts(s, " "); return; case FLAGS_FILL_END:
trace_seq_puts(s, " |"); return;
}
/* Signal a overhead of time execution to the output */ if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
trace_seq_printf(s, "%c ", trace_find_mark(duration)); else
trace_seq_puts(s, " ");
/* * Comments display at + 1 to depth. Since * this is a leaf function, keep the comments * equal to this depth.
*/
cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */ if (call->depth < FTRACE_RETFUNC_DEPTH &&
!WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = 0;
}
/* Overhead and duration */
print_graph_duration(tr, duration, s, flags);
/* Function */ for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
/* Save this function pointer to see if the exit matches */ if (call->depth < FTRACE_RETFUNC_DEPTH &&
!WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = call->func;
}
/* No time */
print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Function */ for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
/* Latency format */ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
print_graph_lat_fmt(s, ent);
return;
}
/* * Entry check for irq code * * returns 1 if * - we are inside irq code * - we just entered irq code * * returns 0 if * - funcgraph-interrupts option is set * - we are not inside irq code
*/ staticint
check_irq_entry(struct trace_iterator *iter, u32 flags, unsignedlong addr, int depth)
{ int cpu = iter->cpu; int *depth_irq; struct fgraph_data *data = iter->private;
addr += iter->tr->text_delta;
/* * If we are either displaying irqs, or we got called as * a graph event and private data does not exist, * then we bypass the irq check.
*/ if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
(!data)) return 0;
/* * We are inside the irq code
*/ if (*depth_irq >= 0) return 1;
if ((addr < (unsignedlong)__irqentry_text_start) ||
(addr >= (unsignedlong)__irqentry_text_end)) return 0;
/* * We are entering irq code.
*/
*depth_irq = depth; return 1;
}
/* * Return check for irq code * * returns 1 if * - we are inside irq code * - we just left irq code * * returns 0 if * - funcgraph-interrupts option is set * - we are not inside irq code
*/ staticint
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{ int cpu = iter->cpu; int *depth_irq; struct fgraph_data *data = iter->private;
/* * If we are either displaying irqs, or we got called as * a graph event and private data does not exist, * then we bypass the irq check.
*/ if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
(!data)) return 0;
/* * We are not inside the irq code.
*/ if (*depth_irq == -1) return 0;
/* * We are inside the irq code, and this is returning entry. * Let's not trace it and clear the entry depth, since * we are out of irq code. * * This condition ensures that we 'leave the irq code' once * we are out of the entry depth. Thus protecting us from * the RETURN entry loss.
*/ if (*depth_irq >= depth) {
*depth_irq = -1; return 1;
}
/* * We are inside the irq code, and this is not the entry.
*/ return 1;
}
staticenum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, struct trace_iterator *iter, u32 flags)
{ struct fgraph_data *data = iter->private; struct ftrace_graph_ent *call; struct ftrace_graph_ret_entry *leaf_ret; staticenum print_line_t ret; int cpu = iter->cpu; /* * print_graph_entry() may consume the current event, * thus @field may become invalid, so we need to save it. * sizeof(struct ftrace_graph_ent_entry) is very small, * it can be safely saved at the stack.
*/ struct ftrace_graph_ent_entry *entry;
u8 save_buf[sizeof(*entry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
/* The ent_size is expected to be as big as the entry */ if (iter->ent_size > sizeof(save_buf))
iter->ent_size = sizeof(save_buf);
leaf_ret = get_return_for_leaf(iter, entry); if (leaf_ret)
ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags); else
ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
if (data) { /* * If we failed to write our output, then we need to make * note of it. Because we already consumed our entry.
*/ if (s->full) {
data->failed = 1;
data->cpu = cpu;
} else
data->failed = 0;
}
if (check_irq_return(iter, flags, trace->depth)) return TRACE_TYPE_HANDLED;
if (data) { struct fgraph_cpu_data *cpu_data; int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
/* * Comments display at + 1 to depth. This is the * return from a function, we now want the comments * to display at the same level of the bracket.
*/
cpu_data->depth = trace->depth - 1;
/* Overhead and duration */
print_graph_duration(tr, duration, s, flags);
/* Closing brace */ for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
/* * Always write out the function name and its return value if the * funcgraph-retval option is enabled.
*/ if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
print_graph_retval(s, NULL, trace, (void *)func, flags,
tr->trace_flags, 0);
} else { /* * If the return function does not have a matching entry, * then the entry was lost. Instead of just printing * the '}' and letting the user guess what function this * belongs to, write out the function name. Always do * that if the funcgraph-tail option is enabled.
*/ if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
trace_seq_puts(s, "}"); else
trace_seq_printf(s, "} /* %ps */", (void *)func);
}
trace_seq_putc(s, '\n');
if (data)
depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
print_graph_prologue(iter, s, 0, 0, flags);
/* No time */
print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Indentation */ if (depth > 0) for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
trace_seq_putc(s, ' ');
/* The comment */
trace_seq_puts(s, "/* ");
switch (iter->ent->type) { case TRACE_BPUTS:
ret = trace_print_bputs_msg_only(iter); if (ret != TRACE_TYPE_HANDLED) return ret; break; case TRACE_BPRINT:
ret = trace_print_bprintk_msg_only(iter); if (ret != TRACE_TYPE_HANDLED) return ret; break; case TRACE_PRINT:
ret = trace_print_printk_msg_only(iter); if (ret != TRACE_TYPE_HANDLED) return ret; break; default:
event = ftrace_find_event(ent->type); if (!event) return TRACE_TYPE_UNHANDLED;
ret = event->funcs->trace(iter, sym_flags, event); if (ret != TRACE_TYPE_HANDLED) return ret;
}
/* * If the last output failed, there's a possibility we need * to print out the missing entry which would never go out.
*/ if (data && data->failed) {
field = &data->ent.ent;
iter->cpu = data->cpu;
ret = print_graph_entry(field, s, iter, flags); if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
ret = TRACE_TYPE_NO_CONSUME;
}
iter->cpu = cpu; return ret;
}
if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) return;
if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return;
print_trace_header(s, iter);
}
__print_graph_headers_flags(tr, s, flags);
}
void graph_trace_open(struct trace_iterator *iter)
{ /* pid and depth on the last trace processed */ struct fgraph_data *data;
gfp_t gfpflags; int cpu;
iter->private = NULL;
/* We can be called in atomic context via ftrace_dump() */
gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
data = kzalloc(sizeof(*data), gfpflags); if (!data) goto out_err;
data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); if (!data->cpu_data) goto out_err_free;
for_each_possible_cpu(cpu) {
pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.