Quellcodebibliothek Statistik Leitseite products/sources/formale Sprachen/C/Linux/kernel/rcu/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 147 kB image not shown  

Quelle  rcutorture.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0+
/*
 * Read-Copy Update module-based torture test facility
 *
 * Copyright (C) IBM Corporation, 2005, 2006
 *
 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 *   Josh Triplett <josh@joshtriplett.org>
 *
 * See also:  Documentation/RCU/torture.rst
 */


#define pr_fmt(fmt) fmt

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate_wait.h>
#include <linux/rcu_notifier.h>
#include <linux/interrupt.h>
#include <linux/sched/signal.h>
#include <uapi/linux/sched/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
#include <linux/vmalloc.h>
#include <linux/sched/debug.h>
#include <linux/sched/sysctl.h>
#include <linux/oom.h>
#include <linux/tick.h>
#include <linux/rcupdate_trace.h>
#include <linux/nmi.h>

#include "rcu.h"

MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney and Josh Triplett ");

// Bits for ->extendables field, extendables param, and related definitions.
#define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits.
#define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
#define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits.
#define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
#define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh.
#define RCUTORTURE_RDR_IRQ 0x02 //  ... disabling interrupts.
#define RCUTORTURE_RDR_PREEMPT 0x04 //  ... disabling preemption.
#define RCUTORTURE_RDR_RBH 0x08 //  ... rcu_read_lock_bh().
#define RCUTORTURE_RDR_SCHED 0x10 //  ... rcu_read_lock_sched().
#define RCUTORTURE_RDR_RCU_1 0x20 //  ... entering another RCU reader.
#define RCUTORTURE_RDR_RCU_2 0x40 //  ... entering another RCU reader.
#define RCUTORTURE_RDR_UPDOWN 0x80 //  ... up-read from task, down-read from timer.
     // Note: Manual start, automatic end.
#define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above.
#define RCUTORTURE_MAX_EXTEND \
 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)  // Intentionally omit RCUTORTURE_RDR_UPDOWN.
#define RCUTORTURE_RDR_ALLBITS \
 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
  RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
     /* Must be power of two minus one. */
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)

torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
       "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
torture_param(bool, gp_cond, false"Use conditional/async GP wait primitives");
torture_param(bool, gp_cond_exp, false"Use conditional/async expedited GP wait primitives");
torture_param(bool, gp_cond_full, false"Use conditional/async full-state GP wait primitives");
torture_param(bool, gp_cond_exp_full, false,
      "Use conditional/async full-stateexpedited GP wait primitives");
torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ,
     "Wait interval for normal conditional grace periods, us (default 16 jiffies)");
torture_param(int, gp_cond_wi_exp, 128,
     "Wait interval for expedited conditional grace periods, us (default 128 us)");
torture_param(bool, gp_exp, false"Use expedited GP wait primitives");
torture_param(bool, gp_normal, false"Use normal (non-expedited) GP wait primitives");
torture_param(bool, gp_poll, false"Use polling GP wait primitives");
torture_param(bool, gp_poll_exp, false"Use polling expedited GP wait primitives");
torture_param(bool, gp_poll_full, false"Use polling full-state GP wait primitives");
torture_param(bool, gp_poll_exp_full, false"Use polling full-state expedited GP wait primitives");
torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ,
     "Wait interval for normal polled grace periods, us (default 16 jiffies)");
torture_param(int, gp_poll_wi_exp, 128,
     "Wait interval for expedited polled grace periods, us (default 128 us)");
torture_param(bool, gp_sync, false"Use synchronous GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers");
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
torture_param(bool, gpwrap_lag, true"Enable grace-period wrap lag testing");
torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period.");
torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)");
torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)");
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
torture_param(bool, stall_no_softlockup, false"Avoid softlockup warning during cpu stall.");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds.");
torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
torture_param(bool, test_no_idle_hz, true"Test support for tickless idle CPUs");
torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");

static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");

static int nrealnocbers;
static int nrealreaders;
static int nrealfakewriters;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *updown_task;
static struct task_struct **nocb_tasks;
static struct task_struct *stats_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task;
static struct task_struct **fwd_prog_tasks;
static struct task_struct **barrier_cbs_tasks;
static struct task_struct *barrier_task;
static struct task_struct *read_exit_task;
static struct task_struct *preempt_task;

#define RCU_TORTURE_PIPE_LEN 10

// Mailbox-like structure to check RCU global memory ordering.
struct rcu_torture_reader_check {
 unsigned long rtc_myloops;
 int rtc_chkrdr;
 unsigned long rtc_chkloops;
 int rtc_ready;
 struct rcu_torture_reader_check *rtc_assigner;
} ____cacheline_internodealigned_in_smp;

// Update-side data structure used to check RCU readers.
struct rcu_torture {
 struct rcu_head rtort_rcu;
 int rtort_pipe_count;
 struct list_head rtort_free;
 int rtort_mbtest;
 struct rcu_torture_reader_check *rtort_chkp;
};

static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture __rcu *rcu_torture_current;
static unsigned long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_mbchk_fail;
static atomic_t n_rcu_torture_mbchk_tries;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_barrier_error;
static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts;
static long n_barrier_successes; /* did rcu_barrier test succeed? */
static unsigned long n_read_exits;
static struct list_head rcu_torture_removed;
static unsigned long shutdown_jiffies;
static unsigned long start_gp_seq;
static atomic_long_t n_nocb_offload;
static atomic_long_t n_nocb_deoffload;

static int rcu_torture_writer_state;
#define RTWS_FIXED_DELAY 0
#define RTWS_DELAY  1
#define RTWS_REPLACE  2
#define RTWS_DEF_FREE  3
#define RTWS_EXP_SYNC  4
#define RTWS_COND_GET  5
#define RTWS_COND_GET_FULL 6
#define RTWS_COND_GET_EXP 7
#define RTWS_COND_GET_EXP_FULL 8
#define RTWS_COND_SYNC  9
#define RTWS_COND_SYNC_FULL 10
#define RTWS_COND_SYNC_EXP 11
#define RTWS_COND_SYNC_EXP_FULL 12
#define RTWS_POLL_GET  13
#define RTWS_POLL_GET_FULL 14
#define RTWS_POLL_GET_EXP 15
#define RTWS_POLL_GET_EXP_FULL 16
#define RTWS_POLL_WAIT  17
#define RTWS_POLL_WAIT_FULL 18
#define RTWS_POLL_WAIT_EXP 19
#define RTWS_POLL_WAIT_EXP_FULL 20
#define RTWS_SYNC  21
#define RTWS_STUTTER  22
#define RTWS_STOPPING  23
static const char * const rcu_torture_writer_state_names[] = {
 "RTWS_FIXED_DELAY",
 "RTWS_DELAY",
 "RTWS_REPLACE",
 "RTWS_DEF_FREE",
 "RTWS_EXP_SYNC",
 "RTWS_COND_GET",
 "RTWS_COND_GET_FULL",
 "RTWS_COND_GET_EXP",
 "RTWS_COND_GET_EXP_FULL",
 "RTWS_COND_SYNC",
 "RTWS_COND_SYNC_FULL",
 "RTWS_COND_SYNC_EXP",
 "RTWS_COND_SYNC_EXP_FULL",
 "RTWS_POLL_GET",
 "RTWS_POLL_GET_FULL",
 "RTWS_POLL_GET_EXP",
 "RTWS_POLL_GET_EXP_FULL",
 "RTWS_POLL_WAIT",
 "RTWS_POLL_WAIT_FULL",
 "RTWS_POLL_WAIT_EXP",
 "RTWS_POLL_WAIT_EXP_FULL",
 "RTWS_SYNC",
 "RTWS_STUTTER",
 "RTWS_STOPPING",
};

/* Record reader segment types and duration for first failing read. */
struct rt_read_seg {
 int rt_readstate;
 unsigned long rt_delay_jiffies;
 unsigned long rt_delay_ms;
 unsigned long rt_delay_us;
 bool rt_preempted;
 int rt_cpu;
 int rt_end_cpu;
 unsigned long long rt_gp_seq;
 unsigned long long rt_gp_seq_end;
 u64 rt_ts;
};
static int err_segs_recorded;
static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
static int rt_read_nsegs;
static int rt_read_preempted;

static const char *rcu_torture_writer_state_getname(void)
{
 unsigned int i = READ_ONCE(rcu_torture_writer_state);

 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
  return "???";
 return rcu_torture_writer_state_names[i];
}

#ifdef CONFIG_RCU_TRACE
static u64 notrace rcu_trace_clock_local(void)
{
 u64 ts = trace_clock_local();

 (void)do_div(ts, NSEC_PER_USEC);
 return ts;
}
#else /* #ifdef CONFIG_RCU_TRACE */
static u64 notrace rcu_trace_clock_local(void)
{
 return 0ULL;
}
#endif /* #else #ifdef CONFIG_RCU_TRACE */

/*
 * Stop aggressive CPU-hog tests a bit before the end of the test in order
 * to avoid interfering with test shutdown.
 */

static bool shutdown_time_arrived(void)
{
 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
}

static unsigned long boost_starttime; /* jiffies of next boost test start. */
static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
     /*  and boost task create/destroy. */
static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
static bool barrier_phase;  /* Test phase. */
static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);

static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */

/*
 * Allocate an element from the rcu_tortures pool.
 */

static struct rcu_torture *
rcu_torture_alloc(void)
{
 struct list_head *p;

 spin_lock_bh(&rcu_torture_lock);
 if (list_empty(&rcu_torture_freelist)) {
  atomic_inc(&n_rcu_torture_alloc_fail);
  spin_unlock_bh(&rcu_torture_lock);
  return NULL;
 }
 atomic_inc(&n_rcu_torture_alloc);
 p = rcu_torture_freelist.next;
 list_del_init(p);
 spin_unlock_bh(&rcu_torture_lock);
 return container_of(p, struct rcu_torture, rtort_free);
}

/*
 * Free an element to the rcu_tortures pool.
 */

static void
rcu_torture_free(struct rcu_torture *p)
{
 atomic_inc(&n_rcu_torture_free);
 spin_lock_bh(&rcu_torture_lock);
 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 spin_unlock_bh(&rcu_torture_lock);
}

/*
 * Operations vector for selecting different types of tests.
 */


struct rcu_torture_ops {
 int ttype;
 void (*init)(void);
 void (*cleanup)(void);
 int (*readlock)(void);
 void (*read_delay)(struct torture_random_state *rrsp,
      struct rt_read_seg *rtrsp);
 void (*readunlock)(int idx);
 int (*readlock_held)(void);   // lockdep.
 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
 int (*down_read)(void);
 void (*up_read)(int idx);
 unsigned long (*get_gp_seq)(void);
 unsigned long (*gp_diff)(unsigned long newunsigned long old);
 void (*deferred_free)(struct rcu_torture *p);
 void (*sync)(void);
 void (*exp_sync)(void);
 unsigned long (*get_gp_state_exp)(void);
 unsigned long (*start_gp_poll_exp)(void);
 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
 bool (*poll_gp_state_exp)(unsigned long oldstate);
 void (*cond_sync_exp)(unsigned long oldstate);
 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
 unsigned long (*get_comp_state)(void);
 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
 unsigned long (*get_gp_state)(void);
 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 unsigned long (*start_gp_poll)(void);
 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
 bool (*poll_gp_state)(unsigned long oldstate);
 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
 bool (*poll_need_2gp)(bool poll, bool poll_full);
 void (*cond_sync)(unsigned long oldstate);
 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
 int poll_active;
 int poll_active_full;
 call_rcu_func_t call;
 void (*cb_barrier)(void);
 void (*fqs)(void);
 void (*stats)(void);
 void (*gp_kthread_dbg)(void);
 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
 int (*stall_dur)(void);
 void (*get_gp_data)(int *flags, unsigned long *gp_seq);
 void (*gp_slow_register)(atomic_t *rgssp);
 void (*gp_slow_unregister)(atomic_t *rgssp);
 bool (*reader_blocked)(void);
 unsigned long long (*gather_gp_seqs)(void);
 void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len);
 void (*set_gpwrap_lag)(unsigned long lag);
 int (*get_gpwrap_count)(int cpu);
 long cbflood_max;
 int irq_capable;
 int can_boost;
 int extendables;
 int slow_gps;
 int no_pi_lock;
 int debug_objects;
 int start_poll_irqsoff;
 int have_up_down;
 const char *name;
};

static struct rcu_torture_ops *cur_ops;

/*
 * Definitions for rcu torture testing.
 */


static int torture_readlock_not_held(void)
{
 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
}

static int rcu_torture_read_lock(void)
{
 rcu_read_lock();
 return 0;
}

static void
rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
{
 unsigned long started;
 unsigned long completed;
 const unsigned long shortdelay_us = 200;
 unsigned long longdelay_ms = 300;
 unsigned long long ts;

 /* We want a short delay sometimes to make a reader delay the grace
 * period, and we want a long delay occasionally to trigger
 * force_quiescent_state. */


 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
     !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
  started = cur_ops->get_gp_seq();
  ts = rcu_trace_clock_local();
  if ((preempt_count() & HARDIRQ_MASK) || softirq_count())
   longdelay_ms = 5; /* Avoid triggering BH limits. */
  mdelay(longdelay_ms);
  rtrsp->rt_delay_ms = longdelay_ms;
  completed = cur_ops->get_gp_seq();
  do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
       started, completed);
 }
 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
  udelay(shortdelay_us);
  rtrsp->rt_delay_us = shortdelay_us;
 }
 if (!preempt_count() &&
     !(torture_random(rrsp) % (nrealreaders * 500)))
  torture_preempt_schedule();  /* QS only if preemptible. */
}

static void rcu_torture_read_unlock(int idx)
{
 rcu_read_unlock();
}

static int rcu_torture_readlock_nesting(void)
{
 if (IS_ENABLED(CONFIG_PREEMPT_RCU))
  return rcu_preempt_depth();
 if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
  return (preempt_count() & PREEMPT_MASK);
 return -1;
}

/*
 * Update callback in the pipe.  This should be invoked after a grace period.
 */

static bool
rcu_torture_pipe_update_one(struct rcu_torture *rp)
{
 int i;
 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);

 if (rtrcp) {
  WRITE_ONCE(rp->rtort_chkp, NULL);
  smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
 }
 i = rp->rtort_pipe_count;
 if (i > RCU_TORTURE_PIPE_LEN)
  i = RCU_TORTURE_PIPE_LEN;
 atomic_inc(&rcu_torture_wcount[i]);
 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
 if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
  rp->rtort_mbtest = 0;
  return true;
 }
 return false;
}

/*
 * Update all callbacks in the pipe.  Suitable for synchronous grace-period
 * primitives.
 */

static void
rcu_torture_pipe_update(struct rcu_torture *old_rp)
{
 struct rcu_torture *rp;
 struct rcu_torture *rp1;

 if (old_rp)
  list_add(&old_rp->rtort_free, &rcu_torture_removed);
 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
  if (rcu_torture_pipe_update_one(rp)) {
   list_del(&rp->rtort_free);
   rcu_torture_free(rp);
  }
 }
}

static void
rcu_torture_cb(struct rcu_head *p)
{
 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);

 if (torture_must_stop_irq()) {
  /* Test is ending, just drop callbacks on the floor. */
  /* The next initialization will pick up the pieces. */
  return;
 }
 if (rcu_torture_pipe_update_one(rp))
  rcu_torture_free(rp);
 else
  cur_ops->deferred_free(rp);
}

static unsigned long rcu_no_completed(void)
{
 return 0;
}

static void rcu_torture_deferred_free(struct rcu_torture *p)
{
 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
}

static void rcu_sync_torture_init(void)
{
 INIT_LIST_HEAD(&rcu_torture_removed);
}

static bool rcu_poll_need_2gp(bool poll, bool poll_full)
{
 return poll;
}

static struct rcu_torture_ops rcu_ops = {
 .ttype   = RCU_FLAVOR,
 .init   = rcu_sync_torture_init,
 .readlock  = rcu_torture_read_lock,
 .read_delay  = rcu_read_delay,
 .readunlock  = rcu_torture_read_unlock,
 .readlock_held  = torture_readlock_not_held,
 .readlock_nesting = rcu_torture_readlock_nesting,
 .get_gp_seq  = rcu_get_gp_seq,
 .gp_diff  = rcu_seq_diff,
 .deferred_free  = rcu_torture_deferred_free,
 .sync   = synchronize_rcu,
 .exp_sync  = synchronize_rcu_expedited,
 .same_gp_state  = same_state_synchronize_rcu,
 .same_gp_state_full = same_state_synchronize_rcu_full,
 .get_comp_state  = get_completed_synchronize_rcu,
 .get_comp_state_full = get_completed_synchronize_rcu_full,
 .get_gp_state  = get_state_synchronize_rcu,
 .get_gp_state_full = get_state_synchronize_rcu_full,
 .start_gp_poll  = start_poll_synchronize_rcu,
 .start_gp_poll_full = start_poll_synchronize_rcu_full,
 .poll_gp_state  = poll_state_synchronize_rcu,
 .poll_gp_state_full = poll_state_synchronize_rcu_full,
 .poll_need_2gp  = rcu_poll_need_2gp,
 .cond_sync  = cond_synchronize_rcu,
 .cond_sync_full  = cond_synchronize_rcu_full,
 .poll_active  = NUM_ACTIVE_RCU_POLL_OLDSTATE,
 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
 .get_gp_state_exp = get_state_synchronize_rcu,
 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
 .poll_gp_state_exp = poll_state_synchronize_rcu,
 .cond_sync_exp  = cond_synchronize_rcu_expedited,
 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full,
 .call   = call_rcu_hurry,
 .cb_barrier  = rcu_barrier,
 .fqs   = rcu_force_quiescent_state,
 .gp_kthread_dbg  = show_rcu_gp_kthreads,
 .check_boost_failed = rcu_check_boost_fail,
 .stall_dur  = rcu_jiffies_till_stall_check,
 .get_gp_data  = rcutorture_get_gp_data,
 .gp_slow_register = rcu_gp_slow_register,
 .gp_slow_unregister = rcu_gp_slow_unregister,
 .reader_blocked  = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)
      ? has_rcu_reader_blocked
      : NULL,
 .gather_gp_seqs  = rcutorture_gather_gp_seqs,
 .format_gp_seqs  = rcutorture_format_gp_seqs,
 .set_gpwrap_lag  = rcu_set_gpwrap_lag,
 .get_gpwrap_count = rcu_get_gpwrap_count,
 .irq_capable  = 1,
 .can_boost  = IS_ENABLED(CONFIG_RCU_BOOST),
 .extendables  = RCUTORTURE_MAX_EXTEND,
 .debug_objects  = 1,
 .start_poll_irqsoff = 1,
 .name   = "rcu"
};

/*
 * Don't even think about trying any of these in real life!!!
 * The names includes "busted", and they really means it!
 * The only purpose of these functions is to provide a buggy RCU
 * implementation to make sure that rcutorture correctly emits
 * buggy-RCU error messages.
 */

static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
{
 /* This is a deliberate bug for testing purposes only! */
 rcu_torture_cb(&p->rtort_rcu);
}

static void synchronize_rcu_busted(void)
{
 /* This is a deliberate bug for testing purposes only! */
}

static void
call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
{
 /* This is a deliberate bug for testing purposes only! */
 func(head);
}

static struct rcu_torture_ops rcu_busted_ops = {
 .ttype  = INVALID_RCU_FLAVOR,
 .init  = rcu_sync_torture_init,
 .readlock = rcu_torture_read_lock,
 .read_delay = rcu_read_delay,  /* just reuse rcu's version. */
 .readunlock = rcu_torture_read_unlock,
 .readlock_held = torture_readlock_not_held,
 .get_gp_seq = rcu_no_completed,
 .deferred_free = rcu_busted_torture_deferred_free,
 .sync  = synchronize_rcu_busted,
 .exp_sync = synchronize_rcu_busted,
 .call  = call_rcu_busted,
 .gather_gp_seqs = rcutorture_gather_gp_seqs,
 .format_gp_seqs = rcutorture_format_gp_seqs,
 .irq_capable = 1,
 .extendables = RCUTORTURE_MAX_EXTEND,
 .name  = "busted"
};

/*
 * Definitions for srcu torture testing.
 */


DEFINE_STATIC_SRCU(srcu_ctl);
static struct srcu_struct srcu_ctld;
static struct srcu_struct *srcu_ctlp = &srcu_ctl;
static struct rcu_torture_ops srcud_ops;

static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
{
 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
}

static int srcu_torture_read_lock(void)
{
 int idx;
 struct srcu_ctr __percpu *scp;
 int ret = 0;

 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);

 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
  idx = srcu_read_lock(srcu_ctlp);
  WARN_ON_ONCE(idx & ~0x1);
  ret += idx;
 }
 if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
  idx = srcu_read_lock_nmisafe(srcu_ctlp);
  WARN_ON_ONCE(idx & ~0x1);
  ret += idx << 1;
 }
 if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
  scp = srcu_read_lock_fast(srcu_ctlp);
  idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
  WARN_ON_ONCE(idx & ~0x1);
  ret += idx << 3;
 }
 return ret;
}

static void
srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
{
 long delay;
 const long uspertick = 1000000 / HZ;
 const long longdelay = 10;

 /* We want there to be long-running readers, but not all the time. */

 delay = torture_random(rrsp) %
  (nrealreaders * 2 * longdelay * uspertick);
 if (!delay && in_task()) {
  schedule_timeout_interruptible(longdelay);
  rtrsp->rt_delay_jiffies = longdelay;
 } else {
  rcu_read_delay(rrsp, rtrsp);
 }
}

static void srcu_torture_read_unlock(int idx)
{
 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
 if (reader_flavor & SRCU_READ_FLAVOR_FAST)
  srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
 if (reader_flavor & SRCU_READ_FLAVOR_NMI)
  srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL))
  srcu_read_unlock(srcu_ctlp, idx & 0x1);
}

static int torture_srcu_read_lock_held(void)
{
 return srcu_read_lock_held(srcu_ctlp);
}

static bool srcu_torture_have_up_down(void)
{
 int rf = reader_flavor;

 if (!rf)
  rf = SRCU_READ_FLAVOR_NORMAL;
 return !!(cur_ops->have_up_down & rf);
}

static int srcu_torture_down_read(void)
{
 int idx;
 struct srcu_ctr __percpu *scp;

 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);
 WARN_ON_ONCE(reader_flavor & (reader_flavor - 1));

 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
  idx = srcu_down_read(srcu_ctlp);
  WARN_ON_ONCE(idx & ~0x1);
  return idx;
 }
 if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
  scp = srcu_down_read_fast(srcu_ctlp);
  idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
  WARN_ON_ONCE(idx & ~0x1);
  return idx << 3;
 }
 WARN_ON_ONCE(1);
 return 0;
}

static void srcu_torture_up_read(int idx)
{
 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
 if (reader_flavor & SRCU_READ_FLAVOR_FAST)
  srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
 else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) ||
   !(reader_flavor & SRCU_READ_FLAVOR_ALL))
  srcu_up_read(srcu_ctlp, idx & 0x1);
 else
  WARN_ON_ONCE(1);
}

static unsigned long srcu_torture_completed(void)
{
 return srcu_batches_completed(srcu_ctlp);
}

static void srcu_torture_deferred_free(struct rcu_torture *rp)
{
 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
}

static void srcu_torture_synchronize(void)
{
 synchronize_srcu(srcu_ctlp);
}

static unsigned long srcu_torture_get_gp_state(void)
{
 return get_state_synchronize_srcu(srcu_ctlp);
}

static unsigned long srcu_torture_start_gp_poll(void)
{
 return start_poll_synchronize_srcu(srcu_ctlp);
}

static bool srcu_torture_poll_gp_state(unsigned long oldstate)
{
 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
}

static void srcu_torture_call(struct rcu_head *head,
         rcu_callback_t func)
{
 call_srcu(srcu_ctlp, head, func);
}

static void srcu_torture_barrier(void)
{
 srcu_barrier(srcu_ctlp);
}

static void srcu_torture_stats(void)
{
 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
}

static void srcu_torture_synchronize_expedited(void)
{
 synchronize_srcu_expedited(srcu_ctlp);
}

static struct rcu_torture_ops srcu_ops = {
 .ttype  = SRCU_FLAVOR,
 .init  = rcu_sync_torture_init,
 .readlock = srcu_torture_read_lock,
 .read_delay = srcu_read_delay,
 .readunlock = srcu_torture_read_unlock,
 .down_read = srcu_torture_down_read,
 .up_read = srcu_torture_up_read,
 .readlock_held = torture_srcu_read_lock_held,
 .get_gp_seq = srcu_torture_completed,
 .gp_diff = rcu_seq_diff,
 .deferred_free = srcu_torture_deferred_free,
 .sync  = srcu_torture_synchronize,
 .exp_sync = srcu_torture_synchronize_expedited,
 .same_gp_state = same_state_synchronize_srcu,
 .get_comp_state = get_completed_synchronize_srcu,
 .get_gp_state = srcu_torture_get_gp_state,
 .start_gp_poll = srcu_torture_start_gp_poll,
 .poll_gp_state = srcu_torture_poll_gp_state,
 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
 .call  = srcu_torture_call,
 .cb_barrier = srcu_torture_barrier,
 .stats  = srcu_torture_stats,
 .get_gp_data = srcu_get_gp_data,
 .cbflood_max = 50000,
 .irq_capable = 1,
 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
 .debug_objects = 1,
 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU)
    ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST,
 .name  = "srcu"
};

static void srcu_torture_init(void)
{
 rcu_sync_torture_init();
 WARN_ON(init_srcu_struct(&srcu_ctld));
 srcu_ctlp = &srcu_ctld;
}

static void srcu_torture_cleanup(void)
{
 cleanup_srcu_struct(&srcu_ctld);
 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
}

/* As above, but dynamically allocated. */
static struct rcu_torture_ops srcud_ops = {
 .ttype  = SRCU_FLAVOR,
 .init  = srcu_torture_init,
 .cleanup = srcu_torture_cleanup,
 .readlock = srcu_torture_read_lock,
 .read_delay = srcu_read_delay,
 .readunlock = srcu_torture_read_unlock,
 .readlock_held = torture_srcu_read_lock_held,
 .down_read = srcu_torture_down_read,
 .up_read = srcu_torture_up_read,
 .get_gp_seq = srcu_torture_completed,
 .gp_diff = rcu_seq_diff,
 .deferred_free = srcu_torture_deferred_free,
 .sync  = srcu_torture_synchronize,
 .exp_sync = srcu_torture_synchronize_expedited,
 .same_gp_state = same_state_synchronize_srcu,
 .get_comp_state = get_completed_synchronize_srcu,
 .get_gp_state = srcu_torture_get_gp_state,
 .start_gp_poll = srcu_torture_start_gp_poll,
 .poll_gp_state = srcu_torture_poll_gp_state,
 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
 .call  = srcu_torture_call,
 .cb_barrier = srcu_torture_barrier,
 .stats  = srcu_torture_stats,
 .get_gp_data = srcu_get_gp_data,
 .cbflood_max = 50000,
 .irq_capable = 1,
 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
 .debug_objects = 1,
 .have_up_down = IS_ENABLED(CONFIG_TINY_SRCU)
    ? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST,
 .name  = "srcud"
};

/* As above, but broken due to inappropriate reader extension. */
static struct rcu_torture_ops busted_srcud_ops = {
 .ttype  = SRCU_FLAVOR,
 .init  = srcu_torture_init,
 .cleanup = srcu_torture_cleanup,
 .readlock = srcu_torture_read_lock,
 .read_delay = rcu_read_delay,
 .readunlock = srcu_torture_read_unlock,
 .readlock_held = torture_srcu_read_lock_held,
 .get_gp_seq = srcu_torture_completed,
 .deferred_free = srcu_torture_deferred_free,
 .sync  = srcu_torture_synchronize,
 .exp_sync = srcu_torture_synchronize_expedited,
 .call  = srcu_torture_call,
 .cb_barrier = srcu_torture_barrier,
 .stats  = srcu_torture_stats,
 .irq_capable = 1,
 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
 .extendables = RCUTORTURE_MAX_EXTEND,
 .name  = "busted_srcud"
};

/*
 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
 * This implementation does not work well with CPU hotplug nor
 * with rcutorture's shuffling.
 */


static void synchronize_rcu_trivial(void)
{
 int cpu;

 for_each_online_cpu(cpu) {
  torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
  WARN_ON_ONCE(raw_smp_processor_id() != cpu);
 }
}

static void rcu_sync_torture_init_trivial(void)
{
 rcu_sync_torture_init();
 // if (onoff_interval || shuffle_interval) {
 if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) {
  onoff_interval = 0;
  shuffle_interval = 0;
 }
}

static int rcu_torture_read_lock_trivial(void)
{
 preempt_disable();
 return 0;
}

static void rcu_torture_read_unlock_trivial(int idx)
{
 preempt_enable();
}

static struct rcu_torture_ops trivial_ops = {
 .ttype  = RCU_TRIVIAL_FLAVOR,
 .init  = rcu_sync_torture_init_trivial,
 .readlock = rcu_torture_read_lock_trivial,
 .read_delay = rcu_read_delay,  /* just reuse rcu's version. */
 .readunlock = rcu_torture_read_unlock_trivial,
 .readlock_held = torture_readlock_not_held,
 .get_gp_seq = rcu_no_completed,
 .sync  = synchronize_rcu_trivial,
 .exp_sync = synchronize_rcu_trivial,
 .irq_capable = 1,
 .name  = "trivial"
};

#ifdef CONFIG_TASKS_RCU

/*
 * Definitions for RCU-tasks torture testing.
 */


static int tasks_torture_read_lock(void)
{
 return 0;
}

static void tasks_torture_read_unlock(int idx)
{
}

static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
{
 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
}

static void synchronize_rcu_mult_test(void)
{
 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
}

static struct rcu_torture_ops tasks_ops = {
 .ttype  = RCU_TASKS_FLAVOR,
 .init  = rcu_sync_torture_init,
 .readlock = tasks_torture_read_lock,
 .read_delay = rcu_read_delay,  /* just reuse rcu's version. */
 .readunlock = tasks_torture_read_unlock,
 .get_gp_seq = rcu_no_completed,
 .deferred_free = rcu_tasks_torture_deferred_free,
 .sync  = synchronize_rcu_tasks,
 .exp_sync = synchronize_rcu_mult_test,
 .call  = call_rcu_tasks,
 .cb_barrier = rcu_barrier_tasks,
 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
 .get_gp_data = rcu_tasks_get_gp_data,
 .irq_capable = 1,
 .slow_gps = 1,
 .name  = "tasks"
};

#define TASKS_OPS &tasks_ops,

#else // #ifdef CONFIG_TASKS_RCU

#define TASKS_OPS

#endif // #else #ifdef CONFIG_TASKS_RCU


#ifdef CONFIG_TASKS_RUDE_RCU

/*
 * Definitions for rude RCU-tasks torture testing.
 */


static struct rcu_torture_ops tasks_rude_ops = {
 .ttype  = RCU_TASKS_RUDE_FLAVOR,
 .init  = rcu_sync_torture_init,
 .readlock = rcu_torture_read_lock_trivial,
 .read_delay = rcu_read_delay,  /* just reuse rcu's version. */
 .readunlock = rcu_torture_read_unlock_trivial,
 .get_gp_seq = rcu_no_completed,
 .sync  = synchronize_rcu_tasks_rude,
 .exp_sync = synchronize_rcu_tasks_rude,
 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
 .get_gp_data = rcu_tasks_rude_get_gp_data,
 .cbflood_max = 50000,
 .irq_capable = 1,
 .name  = "tasks-rude"
};

#define TASKS_RUDE_OPS &tasks_rude_ops,

#else // #ifdef CONFIG_TASKS_RUDE_RCU

#define TASKS_RUDE_OPS

#endif // #else #ifdef CONFIG_TASKS_RUDE_RCU


#ifdef CONFIG_TASKS_TRACE_RCU

/*
 * Definitions for tracing RCU-tasks torture testing.
 */


static int tasks_tracing_torture_read_lock(void)
{
 rcu_read_lock_trace();
 return 0;
}

static void tasks_tracing_torture_read_unlock(int idx)
{
 rcu_read_unlock_trace();
}

static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
{
 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
}

static struct rcu_torture_ops tasks_tracing_ops = {
 .ttype  = RCU_TASKS_TRACING_FLAVOR,
 .init  = rcu_sync_torture_init,
 .readlock = tasks_tracing_torture_read_lock,
 .read_delay = srcu_read_delay,  /* just reuse srcu's version. */
 .readunlock = tasks_tracing_torture_read_unlock,
 .readlock_held = rcu_read_lock_trace_held,
 .get_gp_seq = rcu_no_completed,
 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
 .sync  = synchronize_rcu_tasks_trace,
 .exp_sync = synchronize_rcu_tasks_trace,
 .call  = call_rcu_tasks_trace,
 .cb_barrier = rcu_barrier_tasks_trace,
 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
 .get_gp_data    = rcu_tasks_trace_get_gp_data,
 .cbflood_max = 50000,
 .irq_capable = 1,
 .slow_gps = 1,
 .name  = "tasks-tracing"
};

#define TASKS_TRACING_OPS &tasks_tracing_ops,

#else // #ifdef CONFIG_TASKS_TRACE_RCU

#define TASKS_TRACING_OPS

#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU


static unsigned long rcutorture_seq_diff(unsigned long newunsigned long old)
{
 if (!cur_ops->gp_diff)
  return new - old;
 return cur_ops->gp_diff(new, old);
}

/*
 * RCU torture priority-boost testing.  Runs one real-time thread per
 * CPU for moderate bursts, repeatedly starting grace periods and waiting
 * for them to complete.  If a given grace period takes too long, we assume
 * that priority inversion has occurred.
 */


static int old_rt_runtime = -1;

static void rcu_torture_disable_rt_throttle(void)
{
 /*
 * Disable RT throttling so that rcutorture's boost threads don't get
 * throttled. Only possible if rcutorture is built-in otherwise the
 * user should manually do this by setting the sched_rt_period_us and
 * sched_rt_runtime sysctls.
 */

 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
  return;

 old_rt_runtime = sysctl_sched_rt_runtime;
 sysctl_sched_rt_runtime = -1;
}

static void rcu_torture_enable_rt_throttle(void)
{
 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
  return;

 sysctl_sched_rt_runtime = old_rt_runtime;
 old_rt_runtime = -1;
}

static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
{
 int cpu;
 static int dbg_done;
 unsigned long end = jiffies;
 bool gp_done;
 unsigned long j;
 static unsigned long last_persist;
 unsigned long lp;
 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;

 if (end - *start > mininterval) {
  // Recheck after checking time to avoid false positives.
  smp_mb(); // Time check before grace-period check.
  if (cur_ops->poll_gp_state(gp_state))
   return false// passed, though perhaps just barely
  if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
   // At most one persisted message per boost test.
   j = jiffies;
   lp = READ_ONCE(last_persist);
   if (time_after(j, lp + mininterval) &&
       cmpxchg(&last_persist, lp, j) == lp) {
    if (cpu < 0)
     pr_info("Boost inversion persisted: QS from all CPUs\n");
    else
     pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
   }
   return false// passed on a technicality
  }
  VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
  n_rcu_torture_boost_failure++;
  if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
   pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
    current->rt_priority, gp_state, end - *start);
   cur_ops->gp_kthread_dbg();
   // Recheck after print to flag grace period ending during splat.
   gp_done = cur_ops->poll_gp_state(gp_state);
   pr_info("Boost inversion: GP %lu %s.\n", gp_state,
    gp_done ? "ended already" : "still pending");

  }

  return true// failed
 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
  *start = jiffies;
 }

 return false// passed
}

static int rcu_torture_boost(void *arg)
{
 unsigned long endtime;
 unsigned long gp_state;
 unsigned long gp_state_time;
 unsigned long oldstarttime;
 unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ;

 if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) {
  VERBOSE_TOROUT_STRING("rcu_torture_boost started");
 } else {
  VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period");
  while (time_before(jiffies, booststarttime)) {
   schedule_timeout_idle(HZ);
   if (kthread_should_stop())
    goto cleanup;
  }
  VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period");
 }

 /* Set real-time priority. */
 sched_set_fifo_low(current);

 /* Each pass through the following loop does one boost-test cycle. */
 do {
  bool failed = false// Test failed already in this test interval
  bool gp_initiated = false;

  if (kthread_should_stop())
   goto checkwait;

  /* Wait for the next test interval. */
  oldstarttime = READ_ONCE(boost_starttime);
  while (time_before(jiffies, oldstarttime)) {
   schedule_timeout_interruptible(oldstarttime - jiffies);
   if (stutter_wait("rcu_torture_boost"))
    sched_set_fifo_low(current);
   if (torture_must_stop())
    goto checkwait;
  }

  // Do one boost-test interval.
  endtime = oldstarttime + test_boost_duration * HZ;
  while (time_before(jiffies, endtime)) {
   // Has current GP gone too long?
   if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
    failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
   // If we don't have a grace period in flight, start one.
   if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
    gp_state = cur_ops->start_gp_poll();
    gp_initiated = true;
    gp_state_time = jiffies;
   }
   if (stutter_wait("rcu_torture_boost")) {
    sched_set_fifo_low(current);
    // If the grace period already ended,
    // we don't know when that happened, so
    // start over.
    if (cur_ops->poll_gp_state(gp_state))
     gp_initiated = false;
   }
   if (torture_must_stop())
    goto checkwait;
  }

  // In case the grace period extended beyond the end of the loop.
  if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
   rcu_torture_boost_failed(gp_state, &gp_state_time);

  /*
 * Set the start time of the next test interval.
 * Yes, this is vulnerable to long delays, but such
 * delays simply cause a false negative for the next
 * interval.  Besides, we are running at RT priority,
 * so delays should be relatively rare.
 */

  while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
   if (mutex_trylock(&boost_mutex)) {
    if (oldstarttime == boost_starttime) {
     WRITE_ONCE(boost_starttime,
         jiffies + test_boost_interval * HZ);
     n_rcu_torture_boosts++;
    }
    mutex_unlock(&boost_mutex);
    break;
   }
   schedule_timeout_uninterruptible(HZ / 20);
  }

  /* Go do the stutter. */
checkwait: if (stutter_wait("rcu_torture_boost"))
   sched_set_fifo_low(current);
 } while (!torture_must_stop());

cleanup:
 /* Clean up and exit. */
 while (!kthread_should_stop()) {
  torture_shutdown_absorb("rcu_torture_boost");
  schedule_timeout_uninterruptible(HZ / 20);
 }
 torture_kthread_stopping("rcu_torture_boost");
 return 0;
}

/*
 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 * bursts of calls to force_quiescent_state(), increasing the probability
 * of occurrence of some important types of race conditions.
 */

static int
rcu_torture_fqs(void *arg)
{
 unsigned long fqs_resume_time;
 int fqs_burst_remaining;
 int oldnice = task_nice(current);

 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
 do {
  fqs_resume_time = jiffies + fqs_stutter * HZ;
  while (time_before(jiffies, fqs_resume_time) &&
         !kthread_should_stop()) {
   schedule_timeout_interruptible(HZ / 20);
  }
  fqs_burst_remaining = fqs_duration;
  while (fqs_burst_remaining > 0 &&
         !kthread_should_stop()) {
   cur_ops->fqs();
   udelay(fqs_holdoff);
   fqs_burst_remaining -= fqs_holdoff;
  }
  if (stutter_wait("rcu_torture_fqs"))
   sched_set_normal(current, oldnice);
 } while (!torture_must_stop());
 torture_kthread_stopping("rcu_torture_fqs");
 return 0;
}

// Used by writers to randomly choose from the available grace-period primitives.
static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
static int nsynctypes;

/*
 * Determine which grace-period primitives are available.
 */

static void rcu_torture_write_types(void)
{
 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;

 /* Initialize synctype[] array.  If none set, take default. */
 if (!gp_cond1 &&
     !gp_cond_exp1 &&
     !gp_cond_full1 &&
     !gp_cond_exp_full1 &&
     !gp_exp1 &&
     !gp_poll_exp1 &&
     !gp_poll_exp_full1 &&
     !gp_normal1 &&
     !gp_poll1 &&
     !gp_poll_full1 &&
     !gp_sync1) {
  gp_cond1 = true;
  gp_cond_exp1 = true;
  gp_cond_full1 = true;
  gp_cond_exp_full1 = true;
  gp_exp1 = true;
  gp_poll_exp1 = true;
  gp_poll_exp_full1 = true;
  gp_normal1 = true;
  gp_poll1 = true;
  gp_poll_full1 = true;
  gp_sync1 = true;
 }
 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
  synctype[nsynctypes++] = RTWS_COND_GET;
  pr_info("%s: Testing conditional GPs.\n", __func__);
 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
  pr_alert("%s: gp_cond without primitives.\n", __func__);
 }
 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
  synctype[nsynctypes++] = RTWS_COND_GET_EXP;
  pr_info("%s: Testing conditional expedited GPs.\n", __func__);
 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
  pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
 }
 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
  synctype[nsynctypes++] = RTWS_COND_GET_FULL;
  pr_info("%s: Testing conditional full-state GPs.\n", __func__);
 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
  pr_alert("%s: gp_cond_full without primitives.\n", __func__);
 }
 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
  synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
  pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
 } else if (gp_cond_exp_full &&
     (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
  pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
 }
 if (gp_exp1 && cur_ops->exp_sync) {
  synctype[nsynctypes++] = RTWS_EXP_SYNC;
  pr_info("%s: Testing expedited GPs.\n", __func__);
 } else if (gp_exp && !cur_ops->exp_sync) {
  pr_alert("%s: gp_exp without primitives.\n", __func__);
 }
 if (gp_normal1 && cur_ops->deferred_free) {
  synctype[nsynctypes++] = RTWS_DEF_FREE;
  pr_info("%s: Testing asynchronous GPs.\n", __func__);
 } else if (gp_normal && !cur_ops->deferred_free) {
  pr_alert("%s: gp_normal without primitives.\n", __func__);
 }
 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
     cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
  synctype[nsynctypes++] = RTWS_POLL_GET;
  pr_info("%s: Testing polling GPs.\n", __func__);
 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
  pr_alert("%s: gp_poll without primitives.\n", __func__);
 }
 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
     && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
  synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
  pr_info("%s: Testing polling full-state GPs.\n", __func__);
 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
  pr_alert("%s: gp_poll_full without primitives.\n", __func__);
 }
 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
  synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
  pr_info("%s: Testing polling expedited GPs.\n", __func__);
 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
  pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
 }
 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
  synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
  pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
 } else if (gp_poll_exp_full &&
     (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
  pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
 }
 if (gp_sync1 && cur_ops->sync) {
  synctype[nsynctypes++] = RTWS_SYNC;
  pr_info("%s: Testing normal GPs.\n", __func__);
 } else if (gp_sync && !cur_ops->sync) {
  pr_alert("%s: gp_sync without primitives.\n", __func__);
 }
 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n"__func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp);
}

/*
 * Do the specified rcu_torture_writer() synchronous grace period,
 * while also testing out the polled APIs.  Note well that the single-CPU
 * grace-period optimizations must be accounted for.
 */

static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
{
 unsigned long cookie;
 struct rcu_gp_oldstate cookie_full;
 bool dopoll;
 bool dopoll_full;
 unsigned long r = torture_random(trsp);

 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
 if (dopoll || dopoll_full)
  cpus_read_lock();
 if (dopoll)
  cookie = cur_ops->get_gp_state();
 if (dopoll_full)
  cur_ops->get_gp_state_full(&cookie_full);
 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
  sync();
 sync();
 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
    "%s: Cookie check 3 failed %pS() online %*pbl.",
    __func__, sync, cpumask_pr_args(cpu_online_mask));
 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
    "%s: Cookie check 4 failed %pS() online %*pbl",
    __func__, sync, cpumask_pr_args(cpu_online_mask));
 if (dopoll || dopoll_full)
  cpus_read_unlock();
}

/*
 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 * for that pointed to by rcu_torture_current, freeing the old structure
 * after a series of grace periods (the "pipeline").
 */

static int
rcu_torture_writer(void *arg)
{
 bool boot_ended;
 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
 unsigned long cookie;
 struct rcu_gp_oldstate cookie_full;
 int expediting = 0;
 unsigned long gp_snap;
 unsigned long gp_snap1;
 struct rcu_gp_oldstate gp_snap_full;
 struct rcu_gp_oldstate gp_snap1_full;
 int i;
 int idx;
 int oldnice = task_nice(current);
 struct rcu_gp_oldstate *rgo = NULL;
 int rgo_size = 0;
 struct rcu_torture *rp;
 struct rcu_torture *old_rp;
 static DEFINE_TORTURE_RANDOM(rand);
 unsigned long stallsdone = jiffies;
 bool stutter_waited;
 unsigned long *ulo = NULL;
 int ulo_size = 0;

 // If a new stall test is added, this must be adjusted.
 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
  stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
         HZ * (stall_cpu_repeat + 1);
 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
 if (!can_expedite)
  pr_alert("%s" TORTURE_FLAG
    " GP expediting controlled from boot/sysfs for %s.\n",
    torture_type, cur_ops->name);
 if (WARN_ONCE(nsynctypes == 0,
        "%s: No update-side primitives.\n", __func__)) {
  /*
 * No updates primitives, so don't try updating.
 * The resulting test won't be testing much, hence the
 * above WARN_ONCE().
 */

  rcu_torture_writer_state = RTWS_STOPPING;
  torture_kthread_stopping("rcu_torture_writer");
  return 0;
 }
 if (cur_ops->poll_active > 0) {
  ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
  if (!WARN_ON(!ulo))
   ulo_size = cur_ops->poll_active;
 }
 if (cur_ops->poll_active_full > 0) {
  rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
  if (!WARN_ON(!rgo))
   rgo_size = cur_ops->poll_active_full;
 }

 do {
  rcu_torture_writer_state = RTWS_FIXED_DELAY;
  torture_hrtimeout_us(500, 1000, &rand);
  rp = rcu_torture_alloc();
  if (rp == NULL)
   continue;
  rp->rtort_pipe_count = 0;
  ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
  rcu_torture_writer_state = RTWS_DELAY;
  udelay(torture_random(&rand) & 0x3ff);
  rcu_torture_writer_state = RTWS_REPLACE;
  old_rp = rcu_dereference_check(rcu_torture_current,
            current == writer_task);
  rp->rtort_mbtest = 1;
  rcu_assign_pointer(rcu_torture_current, rp);
  smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
  if (old_rp) {
   i = old_rp->rtort_pipe_count;
   if (i > RCU_TORTURE_PIPE_LEN)
    i = RCU_TORTURE_PIPE_LEN;
   atomic_inc(&rcu_torture_wcount[i]);
   WRITE_ONCE(old_rp->rtort_pipe_count,
       old_rp->rtort_pipe_count + 1);
   ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);

   // Make sure readers block polled grace periods.
   if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
    idx = cur_ops->readlock();
    cookie = cur_ops->get_gp_state();
    WARN_ONCE(cur_ops->poll_gp_state(cookie),
       "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
       __func__,
       rcu_torture_writer_state_getname(),
       rcu_torture_writer_state,
       cookie, cur_ops->get_gp_state());
    if (cur_ops->get_comp_state) {
     cookie = cur_ops->get_comp_state();
     WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
    }
    cur_ops->readunlock(idx);
   }
   if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
    idx = cur_ops->readlock();
    cur_ops->get_gp_state_full(&cookie_full);
    WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
       "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
       __func__,
       rcu_torture_writer_state_getname(),
       rcu_torture_writer_state,
       cpumask_pr_args(cpu_online_mask));
    if (cur_ops->get_comp_state_full) {
     cur_ops->get_comp_state_full(&cookie_full);
     WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
    }
    cur_ops->readunlock(idx);
   }
   switch (synctype[torture_random(&rand) % nsynctypes]) {
   case RTWS_DEF_FREE:
    rcu_torture_writer_state = RTWS_DEF_FREE;
    cur_ops->deferred_free(old_rp);
    break;
   case RTWS_EXP_SYNC:
    rcu_torture_writer_state = RTWS_EXP_SYNC;
    do_rtws_sync(&rand, cur_ops->exp_sync);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_COND_GET:
    rcu_torture_writer_state = RTWS_COND_GET;
    gp_snap = cur_ops->get_gp_state();
    torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
           1000, &rand);
    rcu_torture_writer_state = RTWS_COND_SYNC;
    cur_ops->cond_sync(gp_snap);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_COND_GET_EXP:
    rcu_torture_writer_state = RTWS_COND_GET_EXP;
    gp_snap = cur_ops->get_gp_state_exp();
    torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
           1000, &rand);
    rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
    cur_ops->cond_sync_exp(gp_snap);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_COND_GET_FULL:
    rcu_torture_writer_state = RTWS_COND_GET_FULL;
    cur_ops->get_gp_state_full(&gp_snap_full);
    torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
           1000, &rand);
    rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
    cur_ops->cond_sync_full(&gp_snap_full);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_COND_GET_EXP_FULL:
    rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
    cur_ops->get_gp_state_full(&gp_snap_full);
    torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
           1000, &rand);
    rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
    cur_ops->cond_sync_exp_full(&gp_snap_full);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_POLL_GET:
    rcu_torture_writer_state = RTWS_POLL_GET;
    for (i = 0; i < ulo_size; i++)
     ulo[i] = cur_ops->get_comp_state();
    gp_snap = cur_ops->start_gp_poll();
    rcu_torture_writer_state = RTWS_POLL_WAIT;
    while (!cur_ops->poll_gp_state(gp_snap)) {
     gp_snap1 = cur_ops->get_gp_state();
     for (i = 0; i < ulo_size; i++)
      if (cur_ops->poll_gp_state(ulo[i]) ||
          cur_ops->same_gp_state(ulo[i], gp_snap1)) {
       ulo[i] = gp_snap1;
       break;
      }
     WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
     torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
            1000, &rand);
    }
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_POLL_GET_FULL:
    rcu_torture_writer_state = RTWS_POLL_GET_FULL;
    for (i = 0; i < rgo_size; i++)
     cur_ops->get_comp_state_full(&rgo[i]);
    cur_ops->start_gp_poll_full(&gp_snap_full);
    rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
    while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
     cur_ops->get_gp_state_full(&gp_snap1_full);
     for (i = 0; i < rgo_size; i++)
      if (cur_ops->poll_gp_state_full(&rgo[i]) ||
          cur_ops->same_gp_state_full(&rgo[i],
          &gp_snap1_full)) {
       rgo[i] = gp_snap1_full;
       break;
      }
     WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
     torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
            1000, &rand);
    }
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_POLL_GET_EXP:
    rcu_torture_writer_state = RTWS_POLL_GET_EXP;
    gp_snap = cur_ops->start_gp_poll_exp();
    rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
    while (!cur_ops->poll_gp_state_exp(gp_snap))
     torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
            1000, &rand);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_POLL_GET_EXP_FULL:
    rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
    cur_ops->start_gp_poll_exp_full(&gp_snap_full);
    rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
    while (!cur_ops->poll_gp_state_full(&gp_snap_full))
     torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
            1000, &rand);
    rcu_torture_pipe_update(old_rp);
    break;
   case RTWS_SYNC:
    rcu_torture_writer_state = RTWS_SYNC;
    do_rtws_sync(&rand, cur_ops->sync);
    rcu_torture_pipe_update(old_rp);
    break;
   default:
    WARN_ON_ONCE(1);
    break;
   }
  }
  WRITE_ONCE(rcu_torture_current_version,
      rcu_torture_current_version + 1);
  /* Cycle through nesting levels of rcu_expedite_gp() calls. */
  if (can_expedite &&
      !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
   WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
   if (expediting >= 0)
    rcu_expedite_gp();
   else
    rcu_unexpedite_gp();
   if (++expediting > 3)
    expediting = -expediting;
  } else if (!can_expedite) { /* Disabled during boot, recheck. */
   can_expedite = !rcu_gp_is_expedited() &&
           !rcu_gp_is_normal();
  }
  rcu_torture_writer_state = RTWS_STUTTER;
  boot_ended = rcu_inkernel_boot_has_ended();
  stutter_waited = stutter_wait("rcu_torture_writer");
  if (stutter_waited &&
      !atomic_read(&rcu_fwd_cb_nodelay) &&
      !cur_ops->slow_gps &&
      !torture_must_stop() &&
      boot_ended &&
      time_after(jiffies, stallsdone))
   for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
    if (list_empty(&rcu_tortures[i].rtort_free) &&
        rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
     tracing_off();
     if (cur_ops->gp_kthread_dbg)
      cur_ops->gp_kthread_dbg();
     WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
     rcu_ftrace_dump(DUMP_ALL);
     break;
    }
  if (stutter_waited)
   sched_set_normal(current, oldnice);
 } while (!torture_must_stop());
 rcu_torture_current = NULL;  // Let stats task know that we are done.
 /* Reset expediting back to unexpedited. */
 if (expediting > 0)
  expediting = -expediting;
 while (can_expedite && expediting++ < 0)
  rcu_unexpedite_gp();
 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
 if (!can_expedite)
  pr_alert("%s" TORTURE_FLAG
    " Dynamic grace-period expediting was disabled.\n",
    torture_type);
 kfree(ulo);
 kfree(rgo);
 rcu_torture_writer_state = RTWS_STOPPING;
 torture_kthread_stopping("rcu_torture_writer");
 return 0;
}

/*
 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 * delay between calls.
 */

static int
rcu_torture_fakewriter(void *arg)
{
 unsigned long gp_snap;
 struct rcu_gp_oldstate gp_snap_full;
 DEFINE_TORTURE_RANDOM(rand);

 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
 set_user_nice(current, MAX_NICE);

 if (WARN_ONCE(nsynctypes == 0,
        "%s: No update-side primitives.\n", __func__)) {
  /*
 * No updates primitives, so don't try updating.
 * The resulting test won't be testing much, hence the
 * above WARN_ONCE().
 */

  torture_kthread_stopping("rcu_torture_fakewriter");
  return 0;
 }

 do {
  torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
  if (cur_ops->cb_barrier != NULL &&
      torture_random(&rand) % (nrealfakewriters * 8) == 0) {
   cur_ops->cb_barrier();
  } else {
   switch (synctype[torture_random(&rand) % nsynctypes]) {
   case RTWS_DEF_FREE:
    break;
   case RTWS_EXP_SYNC:
    cur_ops->exp_sync();
    break;
   case RTWS_COND_GET:
    gp_snap = cur_ops->get_gp_state();
    torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
    cur_ops->cond_sync(gp_snap);
    break;
   case RTWS_COND_GET_EXP:
    gp_snap = cur_ops->get_gp_state_exp();
    torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
    cur_ops->cond_sync_exp(gp_snap);
    break;
   case RTWS_COND_GET_FULL:
    cur_ops->get_gp_state_full(&gp_snap_full);
    torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
    cur_ops->cond_sync_full(&gp_snap_full);
    break;
   case RTWS_COND_GET_EXP_FULL:
    cur_ops->get_gp_state_full(&gp_snap_full);
    torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
    cur_ops->cond_sync_exp_full(&gp_snap_full);
    break;
   case RTWS_POLL_GET:
    if (cur_ops->start_poll_irqsoff)
     local_irq_disable();
    gp_snap = cur_ops->start_gp_poll();
    if (cur_ops->start_poll_irqsoff)
     local_irq_enable();
    while (!cur_ops->poll_gp_state(gp_snap)) {
     torture_hrtimeout_jiffies(torture_random(&rand) % 16,
          &rand);
    }
    break;
   case RTWS_POLL_GET_FULL:
    if (cur_ops->start_poll_irqsoff)
     local_irq_disable();
    cur_ops->start_gp_poll_full(&gp_snap_full);
    if (cur_ops->start_poll_irqsoff)
     local_irq_enable();
    while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
     torture_hrtimeout_jiffies(torture_random(&rand) % 16,
          &rand);
    }
    break;
   case RTWS_POLL_GET_EXP:
    gp_snap = cur_ops->start_gp_poll_exp();
    while (!cur_ops->poll_gp_state_exp(gp_snap)) {
     torture_hrtimeout_jiffies(torture_random(&rand) % 16,
          &rand);
    }
    break;
   case RTWS_POLL_GET_EXP_FULL:
    cur_ops->start_gp_poll_exp_full(&gp_snap_full);
    while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
     torture_hrtimeout_jiffies(torture_random(&rand) % 16,
          &rand);
    }
    break;
   case RTWS_SYNC:
    cur_ops->sync();
    break;
   default:
    WARN_ON_ONCE(1);
    break;
   }
  }
  stutter_wait("rcu_torture_fakewriter");
 } while (!torture_must_stop());

 torture_kthread_stopping("rcu_torture_fakewriter");
 return 0;
}

static void rcu_torture_timer_cb(struct rcu_head *rhp)
{
 kfree(rhp);
}

// Set up and carry out testing of RCU's global memory ordering
static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
     struct torture_random_state *trsp)
{
 unsigned long loops;
 int noc = torture_num_online_cpus();
 int rdrchked;
 int rdrchker;
 struct rcu_torture_reader_check *rtrcp; // Me.
 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.

 if (myid < 0)
  return// Don't try this from timer handlers.

 // Increment my counter.
 rtrcp = &rcu_torture_reader_mbchk[myid];
 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);

 // Attempt to assign someone else some checking work.
 rdrchked = torture_random(trsp) % nrealreaders;
 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
 rdrchker = torture_random(trsp) % nrealreaders;
 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
     smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
     !READ_ONCE(rtp->rtort_chkp) &&
     !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
  rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
  WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
  rtrcp->rtc_chkrdr = rdrchked;
  WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
  if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
      cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=95 H=92 G=93

¤ Dauer der Verarbeitung: 0.20 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.