staticvoid sig_handler(int sig __maybe_unused)
{
done = true;
}
/* * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since * we asked by setting its exec_error to the function below, * ftrace__workload_exec_failed_signal. * * XXX We need to handle this more appropriately, emitting an error, etc.
*/ staticvoid ftrace__workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info __maybe_unused, void *ucontext __maybe_unused)
{
workload_exec_errno = info->si_value.sival_int;
done = true;
}
file = get_tracing_file("set_ftrace_pid"); if (!file) {
pr_debug("cannot get tracing file set_ftrace_pid\n"); returnfalse;
}
if (!access(file, F_OK))
supported = true;
put_tracing_file(file); return supported;
}
/* * Wrapper to test if a file in directory .../tracing/instances/XXX * exists. If so return the .../tracing/instances/XXX file for use. * Otherwise the file exists only in directory .../tracing and * is applicable to all instances, for example file available_filter_functions. * Return that file name in this case. * * This functions works similar to get_tracing_file() and expects its caller * to free the returned file name. * * The global variable tracing_instance is set in init_tracing_instance() * called at the beginning to a process specific tracing subdirectory.
*/ staticchar *get_tracing_instance_file(constchar *name)
{ char *file;
if (asprintf(&file, "%s/%s", tracing_instance, name) < 0) return NULL;
/* * Copy the original value and append a '\n'. Without this, * the kernel can hide possible errors.
*/
val_copy = strdup(val); if (!val_copy) goto out_close;
val_copy[size] = '\n';
if (write(fd, val_copy, size + 1) == size + 1)
ret = 0; else
pr_debug("write '%s' to tracing/%s failed: %s\n",
val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
/* Remove .../tracing/instances/XXX subdirectory created with * init_tracing_instance().
*/ staticvoid exit_tracing_instance(void)
{ if (rmdir(tracing_instance))
pr_err("failed to delete tracing/instances directory\n");
}
/* Create subdirectory within .../tracing/instances/XXX to have session * or process specific setup. To delete this setup, simply remove the * subdirectory.
*/ staticint init_tracing_instance(void)
{ char dirname[] = "instances/perf-ftrace-XXXXXX"; char *path;
error:
pr_err("failed to create tracing/instances directory\n"); return -1;
}
staticint set_tracing_pid(struct perf_ftrace *ftrace)
{ int i; char buf[16];
if (target__has_cpu(&ftrace->target)) return 0;
for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
scnprintf(buf, sizeof(buf), "%d",
perf_thread_map__pid(ftrace->evlist->core.threads, i)); if (append_tracing_file("set_ftrace_pid", buf) < 0) return -1;
} return 0;
}
staticint set_tracing_cpumask(struct perf_cpu_map *cpumap)
{ char *cpumask;
size_t mask_size; int ret; int last_cpu;
last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
cpumask = malloc(mask_size); if (cpumask == NULL) {
pr_debug("failed to allocate cpu mask\n"); return -1;
}
/* The function_graph has priority over function tracer. */ if (graph)
ftrace->tracer = "function_graph"; elseif (func)
ftrace->tracer = "function"; /* Otherwise, the default tracer is used. */
pr_debug("%s tracer is used\n", ftrace->tracer);
}
while (!done) { if (poll(&pollfd, 1, -1) < 0) break;
if (pollfd.revents & POLLIN) { int n = read(trace_fd, buf, sizeof(buf)); if (n < 0) break; if (fwrite(buf, n, 1, stdout) != 1) break; /* flush output since stdout is in full buffering mode due to pager */
fflush(stdout);
}
}
write_tracing_file("tracing_on", "0");
if (workload_exec_errno) { constchar *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); /* flush stdout first so below error msg appears at the end. */
fflush(stdout);
pr_err("workload failed: %s\n", emsg); goto out_close_fd;
}
/* read remaining buffer contents */ while (true) { int n = read(trace_fd, buf, sizeof(buf)); if (n <= 0) break; if (fwrite(buf, n, 1, stdout) != 1) break;
}
staticvoid make_histogram(struct perf_ftrace *ftrace, int buckets[], char *buf, size_t len, char *linebuf)
{ int min_latency = ftrace->min_latency; int max_latency = ftrace->max_latency; unsignedint bucket_num = ftrace->bucket_num; char *p, *q; char *unit; double num; int i;
/* ensure NUL termination */
buf[len] = '\0';
/* handle data line by line */ for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
*q = '\0'; /* move it to the line buffer */
strcat(linebuf, p);
/* * parse trace output to get function duration like in * * # tracer: function_graph * # * # CPU DURATION FUNCTION CALLS * # | | | | | | | * 1) + 10.291 us | do_filp_open(); * 1) 4.889 us | do_filp_open(); * 1) 6.086 us | do_filp_open(); *
*/ if (linebuf[0] == '#') goto next;
/* ignore CPU */
p = strchr(linebuf, ')'); if (p == NULL)
p = linebuf;
while (*p && !isdigit(*p) && (*p != '|'))
p++;
/* no duration */ if (*p == '\0' || *p == '|') goto next;
num = strtod(p, &unit); if (!unit || strncmp(unit, " us", 3)) goto next;
if (ftrace->use_nsec)
num *= 1000;
i = 0; if (num < min_latency) goto do_inc;
num -= min_latency;
if (!ftrace->bucket_range) {
i = log2(num); if (i < 0)
i = 0;
} else { // Less than 1 unit (ms or ns), or, in the future, // than the min latency desired. if (num > 0) // 1st entry: [ 1 unit .. bucket_range units ]
i = num / ftrace->bucket_range + 1; if (num >= max_latency - min_latency)
i = bucket_num -1;
} if ((unsigned)i >= bucket_num)
i = bucket_num - 1;
num += min_latency;
do_inc:
buckets[i]++;
update_stats(&latency_stats, num);
next: /* empty the line buffer for the next output */
linebuf[0] = '\0';
}
staticvoid display_histogram(struct perf_ftrace *ftrace, int buckets[])
{ int min_latency = ftrace->min_latency; bool use_nsec = ftrace->use_nsec; unsignedint bucket_num = ftrace->bucket_num; unsignedint i; int total = 0; int bar_total = 46; /* to fit in 80 column */ char bar[] = "###############################################"; int bar_len;
for (i = 0; i < bucket_num; i++)
total += buckets[i];
if (total == 0) {
printf("No data found\n"); return;
}
/* force to use the function_graph tracer to track duration */ if (write_tracing_file("current_tracer", "function_graph") < 0) {
pr_err("failed to set current_tracer to function_graph\n"); return -1;
}
trace_file = get_tracing_instance_file("trace_pipe"); if (!trace_file) {
pr_err("failed to open trace_pipe\n"); return -1;
}
fd = open(trace_file, O_RDONLY); if (fd < 0)
pr_err("failed to open trace_pipe\n");
init_stats(&latency_stats);
put_tracing_file(trace_file); return fd;
}
staticint start_func_latency(struct perf_ftrace *ftrace)
{ if (ftrace->target.use_bpf) return perf_ftrace__latency_start_bpf(ftrace);
int cmd_ftrace(int argc, constchar **argv)
{ int ret; int (*cmd_func)(struct perf_ftrace *) = NULL; struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
}; conststruct option common_options[] = {
OPT_STRING('p', "pid", &ftrace.target.pid, "pid", "Trace on existing process id"), /* TODO: Add short option -t after -t/--tracer can be removed. */
OPT_STRING(0, "tid", &ftrace.target.tid, "tid", "Trace on existing thread id (exclusive to --pid)"),
OPT_INCR('v', "verbose", &verbose, "Be more verbose"),
OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, "System-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", "List of cpus to monitor"),
OPT_END()
}; conststruct option ftrace_options[] = {
OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", "Tracer to use: function_graph(default) or function"),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", "Show available functions to filter",
opt_list_avail_functions, "*"),
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", "Trace given functions using function tracer",
parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", "Do not trace given functions", parse_filter_func),
OPT_CALLBACK(0, "func-opts", &ftrace, "options", "Function tracer options, available options: call-graph,irq-info",
parse_func_tracer_opts),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", "Trace given functions using function_graph tracer",
parse_filter_func),
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", "Set nograph filter on given functions", parse_filter_func),
OPT_CALLBACK(0, "graph-opts", &ftrace, "options", "Graph tracer options, available options: args,retval,retval-hex,retaddr,nosleep-time,noirqs,verbose,thresh=,depth=",
parse_graph_tracer_opts),
OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
OPT_BOOLEAN(0, "inherit", &ftrace.inherit, "Trace children processes"),
OPT_INTEGER('D', "delay", &ftrace.target.initial_delay, "Number of milliseconds to wait before starting tracing after program start"),
OPT_PARENT(common_options),
}; conststruct option latency_options[] = {
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", "Show latency of given function", parse_filter_func),
OPT_CALLBACK('e', "events", &ftrace.event_pair, "event1,event2", "Show latency between the two events", parse_filter_event), #ifdef HAVE_BPF_SKEL
OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, "Use BPF to measure function latency"), #endif
OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, "Use nano-second histogram"),
OPT_UINTEGER(0, "bucket-range", &ftrace.bucket_range, "Bucket range in ms or ns (-n/--use-nsec), default is log2() mode"),
OPT_UINTEGER(0, "min-latency", &ftrace.min_latency, "Minimum latency (1st bucket). Works only with --bucket-range."),
OPT_UINTEGER(0, "max-latency", &ftrace.max_latency, "Maximum latency (last bucket). Works only with --bucket-range."),
OPT_BOOLEAN(0, "hide-empty", &ftrace.hide_empty, "Hide empty buckets in the histogram"),
OPT_PARENT(common_options),
}; conststruct option profile_options[] = {
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", "Trace given functions using function tracer",
parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", "Do not trace given functions", parse_filter_func),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", "Trace given functions using function_graph tracer",
parse_filter_func),
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", "Set nograph filter on given functions", parse_filter_func),
OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
OPT_CALLBACK('s', "sort", &profile_sort, "key", "Sort result by key: total (default), avg, max, count, name.",
parse_sort_key),
OPT_CALLBACK(0, "graph-opts", &ftrace, "options", "Graph tracer options, available options: nosleep-time,noirqs,thresh=,depth=",
parse_graph_tracer_opts),
OPT_PARENT(common_options),
}; conststruct option *options = ftrace_options;
if (subcmd != PERF_FTRACE_NONE) {
argc--;
argv++;
}
} /* for backward compatibility */ if (subcmd == PERF_FTRACE_NONE)
subcmd = PERF_FTRACE_TRACE;
argc = parse_options(argc, argv, options, ftrace_usage,
PARSE_OPT_STOP_AT_NON_OPTION); if (argc < 0) {
ret = -EINVAL; goto out_delete_filters;
}
/* Make system wide (-a) the default target. */ if (!argc && target__none(&ftrace.target))
ftrace.target.system_wide = true;
switch (subcmd) { case PERF_FTRACE_TRACE:
cmd_func = __cmd_ftrace; break; case PERF_FTRACE_LATENCY: if (list_empty(&ftrace.filters) && list_empty(&ftrace.event_pair)) {
pr_err("Should provide a function or events to measure\n");
parse_options_usage(ftrace_usage, options, "T", 1);
parse_options_usage(NULL, options, "e", 1);
ret = -EINVAL; goto out_delete_filters;
} if (!list_empty(&ftrace.filters) && !list_empty(&ftrace.event_pair)) {
pr_err("Please specify either of function or events\n");
parse_options_usage(ftrace_usage, options, "T", 1);
parse_options_usage(NULL, options, "e", 1);
ret = -EINVAL; goto out_delete_filters;
} if (!list_empty(&ftrace.event_pair) && !ftrace.target.use_bpf) {
pr_err("Event processing needs BPF\n");
parse_options_usage(ftrace_usage, options, "b", 1);
parse_options_usage(NULL, options, "e", 1);
ret = -EINVAL; goto out_delete_filters;
} if (!ftrace.bucket_range && ftrace.min_latency) {
pr_err("--min-latency works only with --bucket-range\n");
parse_options_usage(ftrace_usage, options, "min-latency", /*short_opt=*/false);
ret = -EINVAL; goto out_delete_filters;
} if (ftrace.bucket_range && !ftrace.min_latency) { /* default min latency should be the bucket range */
ftrace.min_latency = ftrace.bucket_range;
} if (!ftrace.bucket_range && ftrace.max_latency) {
pr_err("--max-latency works only with --bucket-range\n");
parse_options_usage(ftrace_usage, options, "max-latency", /*short_opt=*/false);
ret = -EINVAL; goto out_delete_filters;
} if (ftrace.bucket_range && ftrace.max_latency &&
ftrace.max_latency < ftrace.min_latency + ftrace.bucket_range) { /* we need at least 1 bucket excluding min and max buckets */
pr_err("--max-latency must be larger than min-latency + bucket-range\n");
parse_options_usage(ftrace_usage, options, "max-latency", /*short_opt=*/false);
ret = -EINVAL; goto out_delete_filters;
} /* set default unless max_latency is set and valid */
ftrace.bucket_num = NUM_BUCKET; if (ftrace.bucket_range) { if (ftrace.max_latency)
ftrace.bucket_num = (ftrace.max_latency - ftrace.min_latency) /
ftrace.bucket_range + 2; else /* default max latency should depend on bucket range and num_buckets */
ftrace.max_latency = (NUM_BUCKET - 2) * ftrace.bucket_range +
ftrace.min_latency;
}
cmd_func = __cmd_latency; break; case PERF_FTRACE_PROFILE:
cmd_func = __cmd_profile; break; case PERF_FTRACE_NONE: default:
pr_err("Invalid subcommand\n");
ret = -EINVAL; goto out_delete_filters;
}
ret = target__validate(&ftrace.target); if (ret) { char errbuf[512];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.