diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-19 22:36:00 +0200 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-29 18:20:48 +0200 |
commit | 9fdcb9afe082794c6dcf2b79b3070ef5dafc8a8f (patch) | |
tree | 30e1bacbfec890edf350e06843c2927ad06c49d2 /kernel/rcu | |
parent | rcutorture: Vary forward-progress test interval (diff) | |
download | linux-9fdcb9afe082794c6dcf2b79b3070ef5dafc8a8f.tar.xz linux-9fdcb9afe082794c6dcf2b79b3070ef5dafc8a8f.zip |
rcutorture: Add self-propagating callback to forward-progress testing
If rcutorture is run on a quiet system with the rcutorture.stutter module
parameter set high, then there can legitimately be an extended period
during which no RCU forward progress takes place. This can result
in false-positive no-forward-progress splats. This commit therefore
makes rcu_torture_fwd_prog() create a self-propagating RCU callback
to ensure that grace periods are in progress for the duration of the
forward-progress test.
Note that the RCU flavor under test must define ->call(), ->sync(),
and ->cb_barrier() for this self-propagating callback to be created.
If one or more of those rcu_torture_ops fields are NULL, then the
rcu_torture_fwd_prog() function will silently proceed without creating
the self-propagating callback.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/rcutorture.c | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 89cc4d9c9a0c..316083687fd7 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1670,20 +1670,49 @@ static int __init rcu_torture_stall_init(void) return torture_create_kthread(rcu_torture_stall, NULL, stall_task); } +/* State structure for forward-progress self-propagating RCU callback. */ +struct fwd_cb_state { + struct rcu_head rh; + int stop; +}; + +/* + * Forward-progress self-propagating RCU callback function. Because + * callbacks run from softirq, this function is an implicit RCU read-side + * critical section. + */ +static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) +{ + struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); + + if (READ_ONCE(fcsp->stop)) { + WRITE_ONCE(fcsp->stop, 2); + return; + } + cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); +} + /* Carry out grace-period forward-progress testing. */ static int rcu_torture_fwd_prog(void *args) { unsigned long cver; + struct fwd_cb_state fcs = { .stop = 0 }; unsigned long gps; int idx; int sd; int sd4; + bool selfpropcb = false; unsigned long stopat; bool tested = false; int tested_tries = 0; static DEFINE_TORTURE_RANDOM(trs); VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); + if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { + init_rcu_head_on_stack(&fcs.rh); + cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); + selfpropcb = true; + } do { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); cver = READ_ONCE(rcu_torture_current_version); @@ -1708,6 +1737,13 @@ static int rcu_torture_fwd_prog(void *args) /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); } while (!torture_must_stop()); + if (selfpropcb) { + WRITE_ONCE(fcs.stop, 1); + cur_ops->sync(); /* Wait for running callback to complete. */ + cur_ops->cb_barrier(); /* Wait for queued callbacks. */ + WARN_ON(READ_ONCE(fcs.stop) != 2); + destroy_rcu_head_on_stack(&fcs.rh); + } /* Short runs might not contain a valid forward-progress attempt. */ WARN_ON(!tested && tested_tries >= 5); torture_kthread_stopping("rcu_torture_fwd_prog"); |