diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2014-09-12 05:40:18 +0200 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-16 22:40:59 +0200 |
commit | 42ddc75ddd478edac6ad9dc8c63abb4441541af2 (patch) | |
tree | 4fe354b785d0d575bcd47af7dc8d9edb299f78f7 /kernel/locking/locktorture.c | |
parent | locktorture: Add documentation (diff) | |
download | linux-42ddc75ddd478edac6ad9dc8c63abb4441541af2.tar.xz linux-42ddc75ddd478edac6ad9dc8c63abb4441541af2.zip |
locktorture: Support mutexes
Add a "mutex_lock" torture test. The main difference with the already
existing spinlock tests is that the latency of the critical region
is much larger. We randomly delay for (arbitrarily) either 500 ms or,
otherwise, 25 ms. While this can considerably reduce the amount of
writes compared to non blocking locks, if run long enough it can have
the same torturous effect. Furthermore it is more representative of
mutex hold times and can stress better things like thrashing.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/locking/locktorture.c')
-rw-r--r-- | kernel/locking/locktorture.c | 41 |
1 files changed, 39 insertions, 2 deletions
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 8c770b2c6e2a..414ba45d580f 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -27,6 +27,7 @@ #include <linux/kthread.h> #include <linux/err.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> @@ -66,7 +67,7 @@ torture_param(bool, verbose, true, static char *torture_type = "spin_lock"; module_param(torture_type, charp, 0444); MODULE_PARM_DESC(torture_type, - "Type of lock to torture (spin_lock, spin_lock_irq, ...)"); + "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); static atomic_t n_lock_torture_errors; @@ -206,6 +207,42 @@ static struct lock_torture_ops spin_lock_irq_ops = { .name = "spin_lock_irq" }; +static DEFINE_MUTEX(torture_mutex); + +static int torture_mutex_lock(void) __acquires(torture_mutex) +{ + mutex_lock(&torture_mutex); + return 0; +} + +static void torture_mutex_delay(struct torture_random_state *trsp) +{ + const unsigned long longdelay_ms = 100; + + /* We want a long delay occasionally to force massive contention. */ + if (!(torture_random(trsp) % + (nrealwriters_stress * 2000 * longdelay_ms))) + mdelay(longdelay_ms * 5); + else + mdelay(longdelay_ms / 5); +#ifdef CONFIG_PREEMPT + if (!(torture_random(trsp) % (nrealwriters_stress * 20000))) + preempt_schedule(); /* Allow test to be preempted. */ +#endif +} + +static void torture_mutex_unlock(void) __releases(torture_mutex) +{ + mutex_unlock(&torture_mutex); +} + +static struct lock_torture_ops mutex_lock_ops = { + .writelock = torture_mutex_lock, + .write_delay = torture_mutex_delay, + .writeunlock = torture_mutex_unlock, + .name = "mutex_lock" +}; + /* * Lock torture writer kthread. Repeatedly acquires and releases * the lock, checking for duplicate acquisitions. @@ -352,7 +389,7 @@ static int __init lock_torture_init(void) int i; int firsterr = 0; static struct lock_torture_ops *torture_ops[] = { - &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, + &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, &mutex_lock_ops, }; if (!torture_init_begin(torture_type, verbose, &torture_runnable)) |