MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney and Josh Triplett ");
// Bits for ->extendables field, extendables param, and related definitions. #define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits. #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1) #define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits. #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2) #define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh. #define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts. #define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption. #define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh(). #define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched(). #define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader. #define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader. #define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer. // Note: Manual start, automatic end. #define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above. #define RCUTORTURE_MAX_EXTEND \
(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN. #define RCUTORTURE_RDR_ALLBITS \
(RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2) #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ /* Must be power of two minus one. */ #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
torture_param(bool, gp_cond_exp_full, false, "Use conditional/async full-stateexpedited GP wait primitives");
torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ, "Wait interval for normal conditional grace periods, us (default 16 jiffies)");
torture_param(int, gp_cond_wi_exp, 128, "Wait interval for expedited conditional grace periods, us (default 128 us)");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ, "Wait interval for normal polled grace periods, us (default 16 jiffies)");
torture_param(int, gp_poll_wi_exp, 128, "Wait interval for expedited polled grace periods, us (default 128 us)");
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers");
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing");
torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period.");
torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)");
torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)");
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds.");
torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
staticchar *torture_type = "rcu";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
/* * Stop aggressive CPU-hog tests a bit before the end of the test in order * to avoid interfering with test shutdown.
*/ staticbool shutdown_time_arrived(void)
{ return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
}
/* * Free an element to the rcu_tortures pool.
*/ staticvoid
rcu_torture_free(struct rcu_torture *p)
{
atomic_inc(&n_rcu_torture_free);
spin_lock_bh(&rcu_torture_lock);
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
spin_unlock_bh(&rcu_torture_lock);
}
/* * Operations vector for selecting different types of tests.
*/
struct rcu_torture_ops { int ttype; void (*init)(void); void (*cleanup)(void); int (*readlock)(void); void (*read_delay)(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp); void (*readunlock)(int idx); int (*readlock_held)(void); // lockdep. int (*readlock_nesting)(void); // actual nesting, if available, -1 if not. int (*down_read)(void); void (*up_read)(int idx); unsignedlong (*get_gp_seq)(void); unsignedlong (*gp_diff)(unsignedlongnew, unsignedlong old); void (*deferred_free)(struct rcu_torture *p); void (*sync)(void); void (*exp_sync)(void); unsignedlong (*get_gp_state_exp)(void); unsignedlong (*start_gp_poll_exp)(void); void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp); bool (*poll_gp_state_exp)(unsignedlong oldstate); void (*cond_sync_exp)(unsignedlong oldstate); void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp); unsignedlong (*get_comp_state)(void); void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp); bool (*same_gp_state)(unsignedlong oldstate1, unsignedlong oldstate2); bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2); unsignedlong (*get_gp_state)(void); void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp); unsignedlong (*start_gp_poll)(void); void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp); bool (*poll_gp_state)(unsignedlong oldstate); bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp); bool (*poll_need_2gp)(bool poll, bool poll_full); void (*cond_sync)(unsignedlong oldstate); void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp); int poll_active; int poll_active_full;
call_rcu_func_t call; void (*cb_barrier)(void); void (*fqs)(void); void (*stats)(void); void (*gp_kthread_dbg)(void); bool (*check_boost_failed)(unsignedlong gp_state, int *cpup); int (*stall_dur)(void); void (*get_gp_data)(int *flags, unsignedlong *gp_seq); void (*gp_slow_register)(atomic_t *rgssp); void (*gp_slow_unregister)(atomic_t *rgssp); bool (*reader_blocked)(void); unsignedlonglong (*gather_gp_seqs)(void); void (*format_gp_seqs)(unsignedlonglong seqs, char *cp, size_t len); void (*set_gpwrap_lag)(unsignedlong lag); int (*get_gpwrap_count)(int cpu); long cbflood_max; int irq_capable; int can_boost; int extendables; int slow_gps; int no_pi_lock; int debug_objects; int start_poll_irqsoff; int have_up_down; constchar *name;
};
/* We want a short delay sometimes to make a reader delay the grace * period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
staticint rcu_torture_readlock_nesting(void)
{ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) return rcu_preempt_depth(); if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) return (preempt_count() & PREEMPT_MASK); return -1;
}
/* * Update callback in the pipe. This should be invoked after a grace period.
*/ staticbool
rcu_torture_pipe_update_one(struct rcu_torture *rp)
{ int i; struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
if (rtrcp) {
WRITE_ONCE(rp->rtort_chkp, NULL);
smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
}
i = rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(rp->rtort_pipe_count, i + 1);
ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count); if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
rp->rtort_mbtest = 0; returntrue;
} returnfalse;
}
/* * Update all callbacks in the pipe. Suitable for synchronous grace-period * primitives.
*/ staticvoid
rcu_torture_pipe_update(struct rcu_torture *old_rp)
{ struct rcu_torture *rp; struct rcu_torture *rp1;
if (old_rp)
list_add(&old_rp->rtort_free, &rcu_torture_removed);
list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { if (rcu_torture_pipe_update_one(rp)) {
list_del(&rp->rtort_free);
rcu_torture_free(rp);
}
}
}
if (torture_must_stop_irq()) { /* Test is ending, just drop callbacks on the floor. */ /* The next initialization will pick up the pieces. */ return;
} if (rcu_torture_pipe_update_one(rp))
rcu_torture_free(rp); else
cur_ops->deferred_free(rp);
}
/* * Don't even think about trying any of these in real life!!! * The names includes "busted", and they really means it! * The only purpose of these functions is to provide a buggy RCU * implementation to make sure that rcutorture correctly emits * buggy-RCU error messages.
*/ staticvoid rcu_busted_torture_deferred_free(struct rcu_torture *p)
{ /* This is a deliberate bug for testing purposes only! */
rcu_torture_cb(&p->rtort_rcu);
}
staticvoid synchronize_rcu_busted(void)
{ /* This is a deliberate bug for testing purposes only! */
}
staticvoid
call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
{ /* This is a deliberate bug for testing purposes only! */
func(head);
}
/* * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. * This implementation does not work well with CPU hotplug nor * with rcutorture's shuffling.
*/
staticvoid synchronize_rcu_trivial(void)
{ int cpu;
/* * RCU torture priority-boost testing. Runs one real-time thread per * CPU for moderate bursts, repeatedly starting grace periods and waiting * for them to complete. If a given grace period takes too long, we assume * that priority inversion has occurred.
*/
staticint old_rt_runtime = -1;
staticvoid rcu_torture_disable_rt_throttle(void)
{ /* * Disable RT throttling so that rcutorture's boost threads don't get * throttled. Only possible if rcutorture is built-in otherwise the * user should manually do this by setting the sched_rt_period_us and * sched_rt_runtime sysctls.
*/ if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) return;
if (end - *start > mininterval) { // Recheck after checking time to avoid false positives.
smp_mb(); // Time check before grace-period check. if (cur_ops->poll_gp_state(gp_state)) returnfalse; // passed, though perhaps just barely if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { // At most one persisted message per boost test.
j = jiffies;
lp = READ_ONCE(last_persist); if (time_after(j, lp + mininterval) &&
cmpxchg(&last_persist, lp, j) == lp) { if (cpu < 0)
pr_info("Boost inversion persisted: QS from all CPUs\n"); else
pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
} returnfalse; // passed on a technicality
}
VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
n_rcu_torture_boost_failure++; if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
current->rt_priority, gp_state, end - *start);
cur_ops->gp_kthread_dbg(); // Recheck after print to flag grace period ending during splat.
gp_done = cur_ops->poll_gp_state(gp_state);
pr_info("Boost inversion: GP %lu %s.\n", gp_state,
gp_done ? "ended already" : "still pending");
if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) {
VERBOSE_TOROUT_STRING("rcu_torture_boost started");
} else {
VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period"); while (time_before(jiffies, booststarttime)) {
schedule_timeout_idle(HZ); if (kthread_should_stop()) goto cleanup;
}
VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period");
}
/* Set real-time priority. */
sched_set_fifo_low(current);
/* Each pass through the following loop does one boost-test cycle. */ do { bool failed = false; // Test failed already in this test interval bool gp_initiated = false;
if (kthread_should_stop()) goto checkwait;
/* Wait for the next test interval. */
oldstarttime = READ_ONCE(boost_starttime); while (time_before(jiffies, oldstarttime)) {
schedule_timeout_interruptible(oldstarttime - jiffies); if (stutter_wait("rcu_torture_boost"))
sched_set_fifo_low(current); if (torture_must_stop()) goto checkwait;
}
// Do one boost-test interval.
endtime = oldstarttime + test_boost_duration * HZ; while (time_before(jiffies, endtime)) { // Has current GP gone too long? if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
failed = rcu_torture_boost_failed(gp_state, &gp_state_time); // If we don't have a grace period in flight, start one. if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
gp_state = cur_ops->start_gp_poll();
gp_initiated = true;
gp_state_time = jiffies;
} if (stutter_wait("rcu_torture_boost")) {
sched_set_fifo_low(current); // If the grace period already ended, // we don't know when that happened, so // start over. if (cur_ops->poll_gp_state(gp_state))
gp_initiated = false;
} if (torture_must_stop()) goto checkwait;
}
// In case the grace period extended beyond the end of the loop. if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
rcu_torture_boost_failed(gp_state, &gp_state_time);
/* * Set the start time of the next test interval. * Yes, this is vulnerable to long delays, but such * delays simply cause a false negative for the next * interval. Besides, we are running at RT priority, * so delays should be relatively rare.
*/ while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { if (mutex_trylock(&boost_mutex)) { if (oldstarttime == boost_starttime) {
WRITE_ONCE(boost_starttime,
jiffies + test_boost_interval * HZ);
n_rcu_torture_boosts++;
}
mutex_unlock(&boost_mutex); break;
}
schedule_timeout_uninterruptible(HZ / 20);
}
/* Go do the stutter. */
checkwait: if (stutter_wait("rcu_torture_boost"))
sched_set_fifo_low(current);
} while (!torture_must_stop());
cleanup: /* Clean up and exit. */ while (!kthread_should_stop()) {
torture_shutdown_absorb("rcu_torture_boost");
schedule_timeout_uninterruptible(HZ / 20);
}
torture_kthread_stopping("rcu_torture_boost"); return 0;
}
/* * RCU torture force-quiescent-state kthread. Repeatedly induces * bursts of calls to force_quiescent_state(), increasing the probability * of occurrence of some important types of race conditions.
*/ staticint
rcu_torture_fqs(void *arg)
{ unsignedlong fqs_resume_time; int fqs_burst_remaining; int oldnice = task_nice(current);
VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); do {
fqs_resume_time = jiffies + fqs_stutter * HZ; while (time_before(jiffies, fqs_resume_time) &&
!kthread_should_stop()) {
schedule_timeout_interruptible(HZ / 20);
}
fqs_burst_remaining = fqs_duration; while (fqs_burst_remaining > 0 &&
!kthread_should_stop()) {
cur_ops->fqs();
udelay(fqs_holdoff);
fqs_burst_remaining -= fqs_holdoff;
} if (stutter_wait("rcu_torture_fqs"))
sched_set_normal(current, oldnice);
} while (!torture_must_stop());
torture_kthread_stopping("rcu_torture_fqs"); return 0;
}
// Used by writers to randomly choose from the available grace-period primitives. staticint synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { }; staticint nsynctypes;
/* * Do the specified rcu_torture_writer() synchronous grace period, * while also testing out the polled APIs. Note well that the single-CPU * grace-period optimizations must be accounted for.
*/ staticvoid do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
{ unsignedlong cookie; struct rcu_gp_oldstate cookie_full; bool dopoll; bool dopoll_full; unsignedlong r = torture_random(trsp);
/* * RCU torture writer kthread. Repeatedly substitutes a new structure * for that pointed to by rcu_torture_current, freeing the old structure * after a series of grace periods (the "pipeline").
*/ staticint
rcu_torture_writer(void *arg)
{ bool boot_ended; bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); unsignedlong cookie; struct rcu_gp_oldstate cookie_full; int expediting = 0; unsignedlong gp_snap; unsignedlong gp_snap1; struct rcu_gp_oldstate gp_snap_full; struct rcu_gp_oldstate gp_snap1_full; int i; int idx; int oldnice = task_nice(current); struct rcu_gp_oldstate *rgo = NULL; int rgo_size = 0; struct rcu_torture *rp; struct rcu_torture *old_rp; static DEFINE_TORTURE_RANDOM(rand); unsignedlong stallsdone = jiffies; bool stutter_waited; unsignedlong *ulo = NULL; int ulo_size = 0;
// If a new stall test is added, this must be adjusted. if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
HZ * (stall_cpu_repeat + 1);
VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); if (!can_expedite)
pr_alert("%s" TORTURE_FLAG " GP expediting controlled from boot/sysfs for %s.\n",
torture_type, cur_ops->name); if (WARN_ONCE(nsynctypes == 0, "%s: No update-side primitives.\n", __func__)) { /* * No updates primitives, so don't try updating. * The resulting test won't be testing much, hence the * above WARN_ONCE().
*/
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer"); return 0;
} if (cur_ops->poll_active > 0) {
ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL); if (!WARN_ON(!ulo))
ulo_size = cur_ops->poll_active;
} if (cur_ops->poll_active_full > 0) {
rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL); if (!WARN_ON(!rgo))
rgo_size = cur_ops->poll_active_full;
}
do {
rcu_torture_writer_state = RTWS_FIXED_DELAY;
torture_hrtimeout_us(500, 1000, &rand);
rp = rcu_torture_alloc(); if (rp == NULL) continue;
rp->rtort_pipe_count = 0;
ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
rcu_torture_writer_state = RTWS_DELAY;
udelay(torture_random(&rand) & 0x3ff);
rcu_torture_writer_state = RTWS_REPLACE;
old_rp = rcu_dereference_check(rcu_torture_current,
current == writer_task);
rp->rtort_mbtest = 1;
rcu_assign_pointer(rcu_torture_current, rp);
smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ if (old_rp) {
i = old_rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(old_rp->rtort_pipe_count,
old_rp->rtort_pipe_count + 1);
ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
// Make sure readers block polled grace periods. if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
idx = cur_ops->readlock();
cookie = cur_ops->get_gp_state();
WARN_ONCE(cur_ops->poll_gp_state(cookie), "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
__func__,
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cookie, cur_ops->get_gp_state()); if (cur_ops->get_comp_state) {
cookie = cur_ops->get_comp_state();
WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
}
cur_ops->readunlock(idx);
} if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
idx = cur_ops->readlock();
cur_ops->get_gp_state_full(&cookie_full);
WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
__func__,
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cpumask_pr_args(cpu_online_mask)); if (cur_ops->get_comp_state_full) {
cur_ops->get_comp_state_full(&cookie_full);
WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
}
cur_ops->readunlock(idx);
} switch (synctype[torture_random(&rand) % nsynctypes]) { case RTWS_DEF_FREE:
rcu_torture_writer_state = RTWS_DEF_FREE;
cur_ops->deferred_free(old_rp); break; case RTWS_EXP_SYNC:
rcu_torture_writer_state = RTWS_EXP_SYNC;
do_rtws_sync(&rand, cur_ops->exp_sync);
rcu_torture_pipe_update(old_rp); break; case RTWS_COND_GET:
rcu_torture_writer_state = RTWS_COND_GET;
gp_snap = cur_ops->get_gp_state();
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC;
cur_ops->cond_sync(gp_snap);
rcu_torture_pipe_update(old_rp); break; case RTWS_COND_GET_EXP:
rcu_torture_writer_state = RTWS_COND_GET_EXP;
gp_snap = cur_ops->get_gp_state_exp();
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
cur_ops->cond_sync_exp(gp_snap);
rcu_torture_pipe_update(old_rp); break; case RTWS_COND_GET_FULL:
rcu_torture_writer_state = RTWS_COND_GET_FULL;
cur_ops->get_gp_state_full(&gp_snap_full);
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
cur_ops->cond_sync_full(&gp_snap_full);
rcu_torture_pipe_update(old_rp); break; case RTWS_COND_GET_EXP_FULL:
rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
cur_ops->get_gp_state_full(&gp_snap_full);
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
cur_ops->cond_sync_exp_full(&gp_snap_full);
rcu_torture_pipe_update(old_rp); break; case RTWS_POLL_GET:
rcu_torture_writer_state = RTWS_POLL_GET; for (i = 0; i < ulo_size; i++)
ulo[i] = cur_ops->get_comp_state();
gp_snap = cur_ops->start_gp_poll();
rcu_torture_writer_state = RTWS_POLL_WAIT; while (!cur_ops->poll_gp_state(gp_snap)) {
gp_snap1 = cur_ops->get_gp_state(); for (i = 0; i < ulo_size; i++) if (cur_ops->poll_gp_state(ulo[i]) ||
cur_ops->same_gp_state(ulo[i], gp_snap1)) {
ulo[i] = gp_snap1; break;
}
WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1000, &rand);
}
rcu_torture_pipe_update(old_rp); break; case RTWS_POLL_GET_FULL:
rcu_torture_writer_state = RTWS_POLL_GET_FULL; for (i = 0; i < rgo_size; i++)
cur_ops->get_comp_state_full(&rgo[i]);
cur_ops->start_gp_poll_full(&gp_snap_full);
rcu_torture_writer_state = RTWS_POLL_WAIT_FULL; while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
cur_ops->get_gp_state_full(&gp_snap1_full); for (i = 0; i < rgo_size; i++) if (cur_ops->poll_gp_state_full(&rgo[i]) ||
cur_ops->same_gp_state_full(&rgo[i],
&gp_snap1_full)) {
rgo[i] = gp_snap1_full; break;
}
WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1000, &rand);
}
rcu_torture_pipe_update(old_rp); break; case RTWS_POLL_GET_EXP:
rcu_torture_writer_state = RTWS_POLL_GET_EXP;
gp_snap = cur_ops->start_gp_poll_exp();
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP; while (!cur_ops->poll_gp_state_exp(gp_snap))
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1000, &rand);
rcu_torture_pipe_update(old_rp); break; case RTWS_POLL_GET_EXP_FULL:
rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
cur_ops->start_gp_poll_exp_full(&gp_snap_full);
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL; while (!cur_ops->poll_gp_state_full(&gp_snap_full))
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1000, &rand);
rcu_torture_pipe_update(old_rp); break; case RTWS_SYNC:
rcu_torture_writer_state = RTWS_SYNC;
do_rtws_sync(&rand, cur_ops->sync);
rcu_torture_pipe_update(old_rp); break; default:
WARN_ON_ONCE(1); break;
}
}
WRITE_ONCE(rcu_torture_current_version,
rcu_torture_current_version + 1); /* Cycle through nesting levels of rcu_expedite_gp() calls. */ if (can_expedite &&
!(torture_random(&rand) & 0xff & (!!expediting - 1))) {
WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); if (expediting >= 0)
rcu_expedite_gp(); else
rcu_unexpedite_gp(); if (++expediting > 3)
expediting = -expediting;
} elseif (!can_expedite) { /* Disabled during boot, recheck. */
can_expedite = !rcu_gp_is_expedited() &&
!rcu_gp_is_normal();
}
rcu_torture_writer_state = RTWS_STUTTER;
boot_ended = rcu_inkernel_boot_has_ended();
stutter_waited = stutter_wait("rcu_torture_writer"); if (stutter_waited &&
!atomic_read(&rcu_fwd_cb_nodelay) &&
!cur_ops->slow_gps &&
!torture_must_stop() &&
boot_ended &&
time_after(jiffies, stallsdone)) for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) if (list_empty(&rcu_tortures[i].rtort_free) &&
rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
tracing_off(); if (cur_ops->gp_kthread_dbg)
cur_ops->gp_kthread_dbg();
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
rcu_ftrace_dump(DUMP_ALL); break;
} if (stutter_waited)
sched_set_normal(current, oldnice);
} while (!torture_must_stop());
rcu_torture_current = NULL; // Let stats task know that we are done. /* Reset expediting back to unexpedited. */ if (expediting > 0)
expediting = -expediting; while (can_expedite && expediting++ < 0)
rcu_unexpedite_gp();
WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); if (!can_expedite)
pr_alert("%s" TORTURE_FLAG " Dynamic grace-period expediting was disabled.\n",
torture_type);
kfree(ulo);
kfree(rgo);
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer"); return 0;
}
/* * RCU torture fake writer kthread. Repeatedly calls sync, with a random * delay between calls.
*/ staticint
rcu_torture_fakewriter(void *arg)
{ unsignedlong gp_snap; struct rcu_gp_oldstate gp_snap_full;
DEFINE_TORTURE_RANDOM(rand);
if (WARN_ONCE(nsynctypes == 0, "%s: No update-side primitives.\n", __func__)) { /* * No updates primitives, so don't try updating. * The resulting test won't be testing much, hence the * above WARN_ONCE().
*/
torture_kthread_stopping("rcu_torture_fakewriter"); return 0;
}
do {
torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); if (cur_ops->cb_barrier != NULL &&
torture_random(&rand) % (nrealfakewriters * 8) == 0) {
cur_ops->cb_barrier();
} else { switch (synctype[torture_random(&rand) % nsynctypes]) { case RTWS_DEF_FREE: break; case RTWS_EXP_SYNC:
cur_ops->exp_sync(); break; case RTWS_COND_GET:
gp_snap = cur_ops->get_gp_state();
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
cur_ops->cond_sync(gp_snap); break; case RTWS_COND_GET_EXP:
gp_snap = cur_ops->get_gp_state_exp();
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
cur_ops->cond_sync_exp(gp_snap); break; case RTWS_COND_GET_FULL:
cur_ops->get_gp_state_full(&gp_snap_full);
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
cur_ops->cond_sync_full(&gp_snap_full); break; case RTWS_COND_GET_EXP_FULL:
cur_ops->get_gp_state_full(&gp_snap_full);
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
cur_ops->cond_sync_exp_full(&gp_snap_full); break; case RTWS_POLL_GET: if (cur_ops->start_poll_irqsoff)
local_irq_disable();
gp_snap = cur_ops->start_gp_poll(); if (cur_ops->start_poll_irqsoff)
local_irq_enable(); while (!cur_ops->poll_gp_state(gp_snap)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
} break; case RTWS_POLL_GET_FULL: if (cur_ops->start_poll_irqsoff)
local_irq_disable();
cur_ops->start_gp_poll_full(&gp_snap_full); if (cur_ops->start_poll_irqsoff)
local_irq_enable(); while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
} break; case RTWS_POLL_GET_EXP:
gp_snap = cur_ops->start_gp_poll_exp(); while (!cur_ops->poll_gp_state_exp(gp_snap)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
} break; case RTWS_POLL_GET_EXP_FULL:
cur_ops->start_gp_poll_exp_full(&gp_snap_full); while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
} break; case RTWS_SYNC:
cur_ops->sync(); break; default:
WARN_ON_ONCE(1); break;
}
}
stutter_wait("rcu_torture_fakewriter");
} while (!torture_must_stop());
// Set up and carry out testing of RCU's global memory ordering staticvoid rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, struct torture_random_state *trsp)
{ unsignedlong loops; int noc = torture_num_online_cpus(); int rdrchked; int rdrchker; struct rcu_torture_reader_check *rtrcp; // Me. struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
if (myid < 0) return; // Don't try this from timer handlers.
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.