// SPDX-License-Identifier: GPL-2.0+ // // Torture test for smp_call_function() and friends. // // Copyright (C) Facebook, 2020. // // Author: Paul E. McKenney <paulmck@kernel.org>
// Data for random primitive selection #define SCF_PRIM_RESCHED 0 #define SCF_PRIM_SINGLE 1 #define SCF_PRIM_SINGLE_RPC 2 #define SCF_PRIM_MANY 3 #define SCF_PRIM_ALL 4 #define SCF_NPRIMS 8 // Need wait and no-wait versions of each, // except for SCF_PRIM_RESCHED and // SCF_PRIM_SINGLE_RPC.
// Periodically prints torture statistics, if periodic statistics printing // was specified via the stat_interval module parameter. staticint
scf_torture_stats(void *arg)
{
VERBOSE_TOROUT_STRING("scf_torture_stats task started"); do {
schedule_timeout_interruptible(stat_interval * HZ);
scf_torture_stats_print();
torture_shutdown_absorb("scf_torture_stats");
} while (!torture_must_stop());
torture_kthread_stopping("scf_torture_stats"); return 0;
}
// Add a primitive to the scf_sel_array[]. staticvoid scf_sel_add(unsignedlong weight, int prim, bool wait)
{ struct scf_selector *scfsp = &scf_sel_array[scf_sel_array_len];
// If no weight, if array would overflow, if computing three-place // percentages would overflow, or if the scf_prim_name[] array would // overflow, don't bother. In the last three two cases, complain. if (!weight ||
WARN_ON_ONCE(scf_sel_array_len >= ARRAY_SIZE(scf_sel_array)) ||
WARN_ON_ONCE(0 - 100000 * weight <= 100000 * scf_sel_totweight) ||
WARN_ON_ONCE(prim >= ARRAY_SIZE(scf_prim_name))) return;
scf_sel_totweight += weight;
scfsp->scfs_weight = scf_sel_totweight;
scfsp->scfs_prim = prim;
scfsp->scfs_wait = wait;
scf_sel_array_len++;
}
// Dump out weighting percentages for scf_prim_name[] array. staticvoid scf_sel_dump(void)
{ int i; unsignedlong oldw = 0; struct scf_selector *scfsp; unsignedlong w;
for (i = 0; i < scf_sel_array_len; i++) {
scfsp = &scf_sel_array[i];
w = (scfsp->scfs_weight - oldw) * 100000 / scf_sel_totweight;
pr_info("%s: %3lu.%03lu %s(%s)\n", __func__, w / 1000, w % 1000,
scf_prim_name[scfsp->scfs_prim],
scfsp->scfs_wait ? "wait" : "nowait");
oldw = scfsp->scfs_weight;
}
}
// Randomly pick a primitive and wait/nowait, based on weightings. staticstruct scf_selector *scf_sel_rand(struct torture_random_state *trsp)
{ int i; unsignedlong w = torture_random(trsp) % (scf_sel_totweight + 1);
for (i = 0; i < scf_sel_array_len; i++) if (scf_sel_array[i].scfs_weight >= w) return &scf_sel_array[i];
WARN_ON_ONCE(1); return &scf_sel_array[0];
}
// Update statistics and occasionally burn up mass quantities of CPU time, // if told to do so via scftorture.longwait. Otherwise, occasionally burn // a little bit. staticvoid scf_handler(void *scfc_in)
{ int i; int j; unsignedlong r = torture_random(this_cpu_ptr(&scf_torture_rand)); struct scf_check *scfcp = scfc_in;
if (likely(scfcp)) {
WRITE_ONCE(scfcp->scfc_out, false); // For multiple receivers. if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp->scfc_in))))
atomic_inc(&n_mb_in_errs);
}
this_cpu_inc(scf_invoked_count); if (longwait <= 0) { if (!(r & 0xffc0)) {
udelay(r & 0x3f); goto out;
}
} if (r & 0xfff) goto out;
r = (r >> 12); if (longwait <= 0) {
udelay((r & 0xff) + 1); goto out;
}
r = r % longwait + 1; for (i = 0; i < r; i++) { for (j = 0; j < 1000; j++) {
udelay(1000);
cpu_relax();
}
}
out: if (unlikely(!scfcp)) return; if (scfcp->scfc_wait) {
WRITE_ONCE(scfcp->scfc_out, true); if (scfcp->scfc_rpc)
complete(&scfcp->scfc_completion);
} else {
scf_add_to_free_list(scfcp);
}
}
// As above, but check for correct CPU. staticvoid scf_handler_1(void *scfc_in)
{ struct scf_check *scfcp = scfc_in;
if (likely(scfcp) && WARN_ONCE(smp_processor_id() != scfcp->scfc_cpu, "%s: Wanted CPU %d got CPU %d\n", __func__, scfcp->scfc_cpu, smp_processor_id())) {
atomic_inc(&n_errs);
}
scf_handler(scfcp);
}
// Randomly do an smp_call_function*() invocation. staticvoid scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp)
{ bool allocfail = false;
uintptr_t cpu; int ret = 0; struct scf_check *scfcp = NULL; struct scf_selector *scfsp = scf_sel_rand(trsp);
if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); if (!scfcp) {
WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN));
atomic_inc(&n_alloc_errs);
allocfail = true;
} else {
scfcp->scfc_cpu = -1;
scfcp->scfc_wait = scfsp->scfs_wait;
scfcp->scfc_out = false;
scfcp->scfc_rpc = false;
}
} if (use_cpus_read_lock)
cpus_read_lock(); else
preempt_disable(); switch (scfsp->scfs_prim) { case SCF_PRIM_RESCHED: if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) {
cpu = torture_random(trsp) % nr_cpu_ids;
scfp->n_resched++;
resched_cpu(cpu);
this_cpu_inc(scf_invoked_count);
} break; case SCF_PRIM_SINGLE:
cpu = torture_random(trsp) % nr_cpu_ids; if (scfsp->scfs_wait)
scfp->n_single_wait++; else
scfp->n_single++; if (scfcp) {
scfcp->scfc_cpu = cpu;
barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
}
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait); if (ret) { if (scfsp->scfs_wait)
scfp->n_single_wait_ofl++; else
scfp->n_single_ofl++;
scf_add_to_free_list(scfcp);
scfcp = NULL;
} break; case SCF_PRIM_SINGLE_RPC: if (!scfcp) break;
cpu = torture_random(trsp) % nr_cpu_ids;
scfp->n_single_rpc++;
scfcp->scfc_cpu = cpu;
scfcp->scfc_wait = true;
init_completion(&scfcp->scfc_completion);
scfcp->scfc_rpc = true;
barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0); if (!ret) { if (use_cpus_read_lock)
cpus_read_unlock(); else
preempt_enable();
wait_for_completion(&scfcp->scfc_completion); if (use_cpus_read_lock)
cpus_read_lock(); else
preempt_disable();
} else {
scfp->n_single_rpc_ofl++;
scf_add_to_free_list(scfcp);
scfcp = NULL;
} break; case SCF_PRIM_MANY: if (scfsp->scfs_wait)
scfp->n_many_wait++; else
scfp->n_many++; if (scfcp) {
barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
}
smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait); break; case SCF_PRIM_ALL: if (scfsp->scfs_wait)
scfp->n_all_wait++; else
scfp->n_all++; if (scfcp) {
barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
}
smp_call_function(scf_handler, scfcp, scfsp->scfs_wait); break; default:
WARN_ON_ONCE(1); if (scfcp)
scfcp->scfc_out = true;
} if (scfcp && scfsp->scfs_wait) { if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) &&
!scfcp->scfc_out)) {
pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim);
atomic_inc(&n_mb_out_errs); // Leak rather than trash!
} else {
scf_add_to_free_list(scfcp);
}
barrier(); // Prevent race-reduction compiler optimizations.
} if (use_cpus_read_lock)
cpus_read_unlock(); else
preempt_enable(); if (allocfail)
schedule_timeout_idle((1 + longwait) * HZ); // Let no-wait handlers complete. elseif (!(torture_random(trsp) & 0xfff))
schedule_timeout_uninterruptible(1);
}
// SCF test kthread. Repeatedly does calls to members of the // smp_call_function() family of functions. staticint scftorture_invoker(void *arg)
{ int cpu; int curcpu;
DEFINE_TORTURE_RANDOM(rand); struct scf_statistics *scfp = (struct scf_statistics *)arg; bool was_offline = false;
VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
cpu = scfp->cpu % nr_cpu_ids;
WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
set_user_nice(current, MAX_NICE); if (holdoff)
schedule_timeout_interruptible(holdoff * HZ);
VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
// Make sure that the CPU is affinitized appropriately during testing.
curcpu = raw_smp_processor_id();
WARN_ONCE(curcpu != cpu, "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
__func__, scfp->cpu, curcpu, nr_cpu_ids);
if (!atomic_dec_return(&n_started)) while (atomic_read_acquire(&n_started)) { if (torture_must_stop()) {
VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu); goto end;
}
schedule_timeout_uninterruptible(1);
}
WRITE_ONCE(scfdone, true); if (nthreads && scf_stats_p) for (i = 0; i < nthreads; i++)
torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task); else goto end;
smp_call_function(scf_cleanup_handler, NULL, 1);
torture_stop_kthread(scf_torture_stats, scf_torture_stats_task);
scf_torture_stats_print(); // -After- the stats thread is stopped!
kfree(scf_stats_p); // -After- the last stats print has completed!
scf_stats_p = NULL;
for (i = 0; i < nr_cpu_ids; i++)
scf_cleanup_free_list(i);
if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs))
scftorture_print_module_parms("End of test: FAILURE"); elseif (torture_onoff_failures())
scftorture_print_module_parms("End of test: LOCK_HOTPLUG"); else
scftorture_print_module_parms("End of test: SUCCESS");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.