/* * Addresses for filtering functions from reporting. This list can be used as a * whitelist or blacklist.
*/ staticstruct { unsignedlong *addrs; /* array of addresses */
size_t size; /* current size */ int used; /* number of elements used */ bool sorted; /* if elements are sorted */ bool whitelist; /* if list is a blacklist or whitelist */
} report_filterlist; static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
/* * The microbenchmark allows benchmarking KCSAN core runtime only. To run * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the * debugfs file. This will not generate any conflicts, and tests fast-path only.
*/ static noinline void microbenchmark(unsignedlong iters)
{ conststruct kcsan_ctx ctx_save = current->kcsan_ctx; constbool was_enabled = READ_ONCE(kcsan_enabled);
u64 cycles;
/* We may have been called from an atomic region; reset context. */
memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); /* * Disable to benchmark fast-path for all accesses, and (expected * negligible) call into slow-path, but never set up watchpoints.
*/
WRITE_ONCE(kcsan_enabled, false);
pr_info("%s begin | iters: %lu\n", __func__, iters);
if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset)) returnfalse;
func_addr -= offset; /* Get function start */
raw_spin_lock_irqsave(&report_filterlist_lock, flags); if (report_filterlist.used == 0) goto out;
/* Sort array if it is unsorted, and then do a binary search. */ if (!report_filterlist.sorted) {
sort(report_filterlist.addrs, report_filterlist.used, sizeof(unsignedlong), cmp_filterlist_addrs, NULL);
report_filterlist.sorted = true;
}
ret = !!bsearch(&func_addr, report_filterlist.addrs,
report_filterlist.used, sizeof(unsignedlong),
cmp_filterlist_addrs); if (report_filterlist.whitelist)
ret = !ret;
if (!addr) {
pr_err("could not find function: '%s'\n", func); return -ENOENT;
}
retry_alloc: /* * Check if we need an allocation, and re-validate under the lock. Since * the report_filterlist_lock is a raw, cannot allocate under the lock.
*/ if (data_race(report_filterlist.used == report_filterlist.size)) {
new_size = (report_filterlist.size ?: 4) * 2;
delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsignedlong), GFP_KERNEL); if (!new_addrs) return -ENOMEM;
}
raw_spin_lock_irqsave(&report_filterlist_lock, flags); if (report_filterlist.used == report_filterlist.size) { /* Check we pre-allocated enough, and retry if not. */ if (report_filterlist.used >= new_size) {
raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
kfree(new_addrs); /* kfree(NULL) is safe */
delay_free = new_addrs = NULL; goto retry_alloc;
}
if (report_filterlist.used)
memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsignedlong));
delay_free = report_filterlist.addrs; /* free the old list */
report_filterlist.addrs = new_addrs; /* switch to the new list */
report_filterlist.size = new_size;
}
/* Note: deduplicating should be done in userspace. */
report_filterlist.addrs[report_filterlist.used++] = addr;
report_filterlist.sorted = false;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.