diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2020-05-27 22:11:14 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-05-28 10:31:09 +0200 |
commit | cfa6705d89b6562f79c40c249f8d94073c4276e4 (patch) | |
tree | 5279a46bfde01387460f5f23756435c7ad8a2a2f | |
parent | locking: Introduce local_lock() (diff) | |
download | linux-cfa6705d89b6562f79c40c249f8d94073c4276e4.tar.xz linux-cfa6705d89b6562f79c40c249f8d94073c4276e4.zip |
radix-tree: Use local_lock for protection
The radix-tree and idr preload mechanisms use preempt_disable() to protect
the complete operation between xxx_preload() and xxx_preload_end().
As the code inside the preempt disabled section acquires regular spinlocks,
which are converted to 'sleeping' spinlocks on a PREEMPT_RT kernel and
eventually calls into a memory allocator, this conflicts with the RT
semantics.
Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to preempt_disable() as
before, but provides also lockdep coverage of the critical region.
No functional change.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200527201119.1692513-3-bigeasy@linutronix.de
-rw-r--r-- | include/linux/idr.h | 2 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 11 | ||||
-rw-r--r-- | lib/radix-tree.c | 20 |
3 files changed, 20 insertions, 13 deletions
diff --git a/include/linux/idr.h b/include/linux/idr.h index ac6e946b6767..3ade03e5c7af 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr) */ static inline void idr_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } /** diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 63e62372443a..c2a9f7c90727 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -16,11 +16,20 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/xarray.h> +#include <linux/local_lock.h> /* Keep unconverted code working */ #define radix_tree_root xarray #define radix_tree_node xa_node +struct radix_tree_preload { + local_lock_t lock; + unsigned nr; + /* nodes->parent points to next preallocated node */ + struct radix_tree_node *nodes; +}; +DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: @@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } void __rcu **idr_get_free(struct radix_tree_root *root, diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 2ee6ae3b0ade..34e406fe561f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> +#include <linux/local_lock.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> @@ -27,7 +28,6 @@ #include <linux/string.h> #include <linux/xarray.h> - /* * Radix tree node cache. */ @@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep; /* * Per-cpu pool of preloaded nodes */ -struct radix_tree_preload { - unsigned nr; - /* nodes->parent points to next preallocated node */ - struct radix_tree_node *nodes; +DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { + .lock = INIT_LOCAL_LOCK(lock), }; -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); static inline struct radix_tree_node *entry_to_node(void *ptr) { @@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; @@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(&radix_tree_preloads.lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged); void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) - preempt_disable(); + local_lock(&radix_tree_preloads.lock); } EXPORT_SYMBOL(idr_preload); |