/* * This gets called in a loop recording the time it took to write * the tracepoint. What it writes is the time statistics of the last * tracepoint write. As there is nothing to write the first time * it simply writes "START". As the first write is cold cache and * the rest is hot, we save off that time in bm_first and it is * reported as "first", which is shown in the second write to the * tracepoint. The "first" field is written within the statics from * then on but never changes.
*/ staticvoid trace_do_benchmark(void)
{
u64 start;
u64 stop;
u64 delta;
u64 stddev;
u64 seed;
u64 last_seed; unsignedint avg; unsignedint std = 0;
/* Only run if the tracepoint is actually active */ if (!trace_benchmark_event_enabled() || !tracing_is_on()) return;
/* * The first read is cold cached, keep it separate from the * other calculations.
*/ if (bm_cnt == 1) {
bm_first = delta;
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN, "first=%llu [COLD CACHED]", bm_first); return;
}
bm_last = delta;
if (delta > bm_max)
bm_max = delta; if (!bm_min || delta < bm_min)
bm_min = delta;
/* * When bm_cnt is greater than UINT_MAX, it breaks the statistics * accounting. Freeze the statistics when that happens. * We should have enough data for the avg and stddev anyway.
*/ if (bm_cnt > UINT_MAX) {
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN, "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev); return;
}
if (stddev > 0) { int i = 0; /* * stddev is the square of standard deviation but * we want the actually number. Use the average * as our seed to find the std. * * The next try is: * x = (x + N/x) / 2 * * Where N is the squared number to find the square * root of.
*/
seed = avg; do {
last_seed = seed;
seed = stddev; if (!last_seed) break;
seed = div64_u64(seed, last_seed);
seed += last_seed;
do_div(seed, 2);
} while (i++ < 10 && last_seed != seed);
staticint benchmark_event_kthread(void *arg)
{ /* sleep a bit to make sure the tracepoint gets activated */
msleep(100);
while (!kthread_should_stop()) {
trace_do_benchmark();
/* * We don't go to sleep, but let others run as well. * This is basically a "yield()" to let any task that * wants to run, schedule in, but if the CPU is idle, * we'll keep burning cycles. * * Note the tasks_rcu_qs() version of cond_resched() will * notify synchronize_rcu_tasks() that this thread has * passed a quiescent state for rcu_tasks. Otherwise * this thread will never voluntarily schedule which would * block synchronize_rcu_tasks() indefinitely.
*/
cond_resched_tasks_rcu_qs();
}
return 0;
}
/* * When the benchmark tracepoint is enabled, it calls this * function and the thread that calls the tracepoint is created.
*/ int trace_benchmark_reg(void)
{ if (!ok_to_run) {
pr_warn("trace benchmark cannot be started via kernel command line\n"); return -EBUSY;
}
bm_event_thread = kthread_run(benchmark_event_kthread,
NULL, "event_benchmark"); if (IS_ERR(bm_event_thread)) {
pr_warn("trace benchmark failed to create kernel thread\n"); return PTR_ERR(bm_event_thread);
}
return 0;
}
/* * When the benchmark tracepoint is disabled, it calls this * function and the thread that calls the tracepoint is deleted * and all the numbers are reset.
*/ void trace_benchmark_unreg(void)
{ if (!bm_event_thread) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.