staticenum event_status read_page(int cpu)
{ struct buffer_data_read_page *bpage; struct ring_buffer_event *event; struct rb_page *rpage; unsignedlong commit; int page_size; int *entry; int ret; int inc; int i;
bpage = ring_buffer_alloc_read_page(buffer, cpu); if (IS_ERR(bpage)) return EVENT_DROPPED;
page_size = ring_buffer_subbuf_size_get(buffer);
ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1); if (ret >= 0) {
rpage = ring_buffer_read_page_data(bpage); /* The commit may have missed event flags set, clear them */
commit = local_read(&rpage->commit) & 0xfffff; for (i = 0; i < commit && !test_error ; i += inc) {
if (i >= (page_size - offsetof(struct rb_page, data))) {
TEST_ERROR(); break;
}
inc = -1;
event = (void *)&rpage->data[i]; switch (event->type_len) { case RINGBUF_TYPE_PADDING: /* failed writes may be discarded events */ if (!event->time_delta)
TEST_ERROR();
inc = event->array[0] + 4; break; case RINGBUF_TYPE_TIME_EXTEND:
inc = 8; break; case 0:
entry = ring_buffer_event_data(event); if (*entry != cpu) {
TEST_ERROR(); break;
}
read++; if (!event->array[0]) {
TEST_ERROR(); break;
}
inc = event->array[0] + 4; break; default:
entry = ring_buffer_event_data(event); if (*entry != cpu) {
TEST_ERROR(); break;
}
read++;
inc = ((event->type_len + 1) * 4);
} if (test_error) break;
if (ret < 0) return EVENT_DROPPED; return EVENT_FOUND;
}
staticvoid ring_buffer_consumer(void)
{ /* toggle between reading pages and events */
read_events ^= 1;
read = 0; /* * Continue running until the producer specifically asks to stop * and is ready for the completion.
*/ while (!READ_ONCE(reader_finish)) { int found = 1;
while (found && !test_error) { int cpu;
found = 0;
for_each_online_cpu(cpu) { enum event_status stat;
if (read_events)
stat = read_event(cpu); else
stat = read_page(cpu);
if (test_error) break;
if (stat == EVENT_FOUND)
found = 1;
}
}
/* Wait till the producer wakes us up when there is more data * available or when the producer wants us to finish reading.
*/
set_current_state(TASK_INTERRUPTIBLE); if (reader_finish) break;
/* * Hammer the buffer for 10 secs (this may * make the system stall)
*/
trace_printk("Starting ring buffer hammer\n");
start_time = ktime_get();
timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC); do { struct ring_buffer_event *event; int *entry; int i;
for (i = 0; i < write_iteration; i++) {
event = ring_buffer_lock_reserve(buffer, 10); if (!event) {
missed++;
} else {
hit++;
entry = ring_buffer_event_data(event);
*entry = smp_processor_id();
ring_buffer_unlock_commit(buffer);
}
}
end_time = ktime_get();
cnt++; if (consumer && !(cnt % wakeup_interval))
wake_up_process(consumer);
#ifndef CONFIG_PREEMPTION /* * If we are a non preempt kernel, the 10 seconds run will * stop everything while it runs. Instead, we will call * cond_resched and also add any time that was lost by a * reschedule. * * Do a cond resched at the same frequency we would wake up * the reader.
*/ if (cnt % wakeup_interval)
cond_resched(); #endif
} while (ktime_before(end_time, timeout) && !break_test());
trace_printk("End ring buffer hammer\n");
if (consumer) { /* Init both completions here to avoid races */
init_completion(&read_start);
init_completion(&read_done); /* the completions must be visible before the finish var */
smp_wmb();
reader_finish = 1;
wake_up_process(consumer);
wait_for_completion(&read_done);
}
if (!disable_reader) { if (consumer_fifo)
trace_printk("Running Consumer at SCHED_FIFO %s\n",
str_low_high(consumer_fifo == 1)); else
trace_printk("Running Consumer at nice: %d\n",
consumer_nice);
} if (producer_fifo)
trace_printk("Running Producer at SCHED_FIFO %s\n",
str_low_high(producer_fifo == 1)); else
trace_printk("Running Producer at nice: %d\n",
producer_nice);
/* Let the user know that the test is running at low priority */ if (!producer_fifo && !consumer_fifo &&
producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
trace_printk("WARNING!!! This test is running at lowest priority.\n");
/* Convert time from usecs to millisecs */
do_div(time, USEC_PER_MSEC); if (time)
hit /= (long)time; else
trace_printk("TIME IS ZERO??\n");
trace_printk("Entries per millisec: %ld\n", hit);
if (hit) { /* Calculate the average time in nanosecs */
avg = NSEC_PER_MSEC / hit;
trace_printk("%ld ns per entry\n", avg);
}
if (missed) { if (time)
missed /= (long)time;
trace_printk("Total iterations per millisec: %ld\n",
hit + missed);
/* it is possible that hit + missed will overflow and be zero */ if (!(hit + missed)) {
trace_printk("hit + missed overflowed and totalled zero!\n");
hit--; /* make it non zero */
}
/* Calculate the average time in nanosecs */
avg = NSEC_PER_MSEC / (hit + missed);
trace_printk("%ld ns per entry\n", avg);
}
}
staticint ring_buffer_consumer_thread(void *arg)
{ while (!break_test()) {
complete(&read_start);
ring_buffer_consumer();
set_current_state(TASK_INTERRUPTIBLE); if (break_test()) break;
schedule();
}
__set_current_state(TASK_RUNNING);
if (!kthread_should_stop())
wait_to_die();
return 0;
}
staticint ring_buffer_producer_thread(void *arg)
{ while (!break_test()) {
ring_buffer_reset(buffer);
if (consumer) {
wake_up_process(consumer);
wait_for_completion(&read_start);
}
ring_buffer_producer(); if (break_test()) goto out_kill;
trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE); if (break_test()) goto out_kill;
schedule_timeout(HZ * SLEEP_TIME);
}
out_kill:
__set_current_state(TASK_RUNNING); if (!kthread_should_stop())
wait_to_die();
return 0;
}
staticint __init ring_buffer_benchmark_init(void)
{ int ret;
/* make a one meg buffer in overwite mode */
buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); if (!buffer) return -ENOMEM;
if (!disable_reader) {
consumer = kthread_create(ring_buffer_consumer_thread,
NULL, "rb_consumer");
ret = PTR_ERR(consumer); if (IS_ERR(consumer)) goto out_fail;
}
producer = kthread_run(ring_buffer_producer_thread,
NULL, "rb_producer");
ret = PTR_ERR(producer);
if (IS_ERR(producer)) goto out_kill;
/* * Run them as low-prio background tasks by default:
*/ if (!disable_reader) { if (consumer_fifo >= 2)
sched_set_fifo(consumer); elseif (consumer_fifo == 1)
sched_set_fifo_low(consumer); else
set_user_nice(consumer, consumer_nice);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.