/* Common ftrace options */
xbc_node_for_each_array_value(node, "options", anode, p) { if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) {
pr_err("String is too long: %s\n", p); continue;
}
if (trace_set_options(tr, buf) < 0)
pr_err("Failed to set option: %s\n", buf);
}
p = xbc_node_find_value(node, "tracing_on", NULL); if (p && *p != '\0') { if (kstrtoul(p, 10, &v))
pr_err("Failed to set tracing on: %s\n", p); if (v)
tracer_tracing_on(tr); else
tracer_tracing_off(tr);
}
p = xbc_node_find_value(node, "trace_clock", NULL); if (p && *p != '\0') { if (tracing_set_clock(tr, p) < 0)
pr_err("Failed to set trace clock: %s\n", p);
}
p = xbc_node_find_value(node, "buffer_size", NULL); if (p && *p != '\0') {
v = memparse(p, NULL); if (v < PAGE_SIZE)
pr_err("Buffer size is too small: %s\n", p); if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
pr_err("Failed to resize trace buffer to %s\n", p);
}
p = xbc_node_find_value(node, "cpumask", NULL); if (p && *p != '\0') {
cpumask_var_t new_mask;
if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) { if (cpumask_parse(p, new_mask) < 0 ||
tracing_set_cpumask(tr, new_mask) < 0)
pr_err("Failed to set new CPU mask %s\n", p);
free_cpumask_var(new_mask);
}
}
}
xbc_node_for_each_subkey(hnode, node) {
p = xbc_node_get_data(node); if (!isdigit(p[0])) continue; /* All digit started node should be instances. */
ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param); if (ret < 0) break;
}
if (xbc_node_find_subkey(hnode, param))
ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
xbc_node_for_each_subkey(hnode, node) {
p = xbc_node_get_data(node); if (!isdigit(p[0])) continue; /* All digit started node should be instances. */ if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
tmp = kstrdup(buf, GFP_KERNEL); if (!tmp) return; if (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply hist trigger: %s\n", tmp);
kfree(tmp);
}
}
if (xbc_node_find_subkey(hnode, "keys")) { if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
tmp = kstrdup(buf, GFP_KERNEL); if (!tmp) return; if (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply hist trigger: %s\n", tmp);
kfree(tmp);
}
}
} #else staticvoid __init
trace_boot_init_histograms(struct trace_event_file *file, struct xbc_node *hnode, char *buf, size_t size)
{ /* do nothing */
} #endif
group = xbc_node_get_data(gnode);
event = xbc_node_get_data(enode);
if (!strcmp(group, "kprobes")) if (trace_boot_add_kprobe_event(enode, event) < 0) return; if (!strcmp(group, "synthetic")) if (trace_boot_add_synth_event(enode, event) < 0) return;
mutex_lock(&event_mutex);
file = find_event_file(tr, group, event); if (!file) {
pr_err("Failed to find event: %s:%s\n", group, event); goto out;
}
p = xbc_node_find_value(enode, "filter", NULL); if (p && *p != '\0') { if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
pr_err("filter string is too long: %s\n", p); elseif (apply_event_filter(file, buf) < 0)
pr_err("Failed to apply filter: %s\n", buf);
}
if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
xbc_node_for_each_array_value(enode, "actions", anode, p) { if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0)
pr_err("action string is too long: %s\n", p); elseif (trigger_process_regex(file, buf) < 0)
pr_err("Failed to apply an action: %s\n", p);
}
anode = xbc_node_find_subkey(enode, "hist"); if (anode)
trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
} elseif (xbc_node_find_value(enode, "actions", NULL))
pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
if (xbc_node_find_value(enode, "enable", NULL)) { if (trace_event_enable_disable(file, 1, 0) < 0)
pr_err("Failed to enable event node: %s:%s\n",
group, event);
}
out:
mutex_unlock(&event_mutex);
}
p = xbc_node_find_value(node, "tracer", NULL); if (p && *p != '\0') { if (tracing_set_tracer(tr, p) < 0)
pr_err("Failed to set given tracer: %s\n", p);
}
/* Since tracer can free snapshot buffer, allocate snapshot here.*/ if (xbc_node_find_value(node, "alloc_snapshot", NULL)) { if (tracing_alloc_snapshot_instance(tr) < 0)
pr_err("Failed to allocate snapshot buffer\n");
}
}
return 0;
} /* * Start tracing at the end of core-initcall, so that it starts tracing * from the beginning of postcore_initcall.
*/
core_initcall_sync(trace_boot_init);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.