From 91abab83839aa2eba073e4a63c729832fdb27ea1 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 1 Jul 2019 17:03:29 -0400 Subject: XArray: Fix xas_next() with a single entry at 0 If there is only a single entry at 0, the first time we call xas_next(), we return the entry. Unfortunately, all subsequent times we call xas_next(), we also return the entry at 0 instead of noticing that the xa_index is now greater than zero. This broke find_get_pages_contig(). Fixes: 64d3e9a9e0cc ("xarray: Step through an XArray") Reported-by: Kent Overstreet Signed-off-by: Matthew Wilcox (Oracle) --- lib/test_xarray.c | 24 ++++++++++++++++++++++++ lib/xarray.c | 4 ++++ 2 files changed, 28 insertions(+) (limited to 'lib') diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 9d631a7b6a70..7df4f7f395bf 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1110,6 +1110,28 @@ static noinline void check_find_entry(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_move_tiny(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + XA_BUG_ON(xa, !xa_empty(xa)); + rcu_read_lock(); + XA_BUG_ON(xa, xas_next(&xas) != NULL); + XA_BUG_ON(xa, xas_next(&xas) != NULL); + rcu_read_unlock(); + xa_store_index(xa, 0, GFP_KERNEL); + rcu_read_lock(); + xas_set(&xas, 0); + XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); + XA_BUG_ON(xa, xas_next(&xas) != NULL); + xas_set(&xas, 0); + XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); + XA_BUG_ON(xa, xas_prev(&xas) != NULL); + rcu_read_unlock(); + xa_erase_index(xa, 0); + XA_BUG_ON(xa, !xa_empty(xa)); +} + static noinline void check_move_small(struct xarray *xa, unsigned long idx) { XA_STATE(xas, xa, 0); @@ -1217,6 +1239,8 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); + check_move_tiny(xa); + for (i = 0; i < 16; i++) check_move_small(xa, 1UL << i); diff --git a/lib/xarray.c b/lib/xarray.c index 446b956c9188..1237c213f52b 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -994,6 +994,8 @@ void *__xas_prev(struct xa_state *xas) if (!xas_frozen(xas->xa_node)) xas->xa_index--; + if (!xas->xa_node) + return set_bounds(xas); if (xas_not_node(xas->xa_node)) return xas_load(xas); @@ -1031,6 +1033,8 @@ void *__xas_next(struct xa_state *xas) if (!xas_frozen(xas->xa_node)) xas->xa_index++; + if (!xas->xa_node) + return set_bounds(xas); if (xas_not_node(xas->xa_node)) return xas_load(xas); -- cgit v1.2.3 From 5a74ac4c4a97bd8b7dba054304d598e2a882fea6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 1 Nov 2019 21:36:39 -0400 Subject: idr: Fix idr_get_next_ul race with idr_remove Commit 5c089fd0c734 ("idr: Fix idr_get_next race with idr_remove") neglected to fix idr_get_next_ul(). As far as I can tell, nobody's actually using this interface under the RCU read lock, but fix it now before anybody decides to use it. Fixes: 5c089fd0c734 ("idr: Fix idr_get_next race with idr_remove") Signed-off-by: Matthew Wilcox (Oracle) --- lib/idr.c | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index 66a374892482..c2cf2c52bbde 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr, EXPORT_SYMBOL(idr_for_each); /** - * idr_get_next() - Find next populated entry. + * idr_get_next_ul() - Find next populated entry. * @idr: IDR handle. * @nextid: Pointer to an ID. * @@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each); * to the ID of the found value. To use in a loop, the value pointed to by * nextid must be incremented by the user. */ -void *idr_get_next(struct idr *idr, int *nextid) +void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) { struct radix_tree_iter iter; void __rcu **slot; @@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid) } if (!slot) return NULL; - id = iter.index + base; - - if (WARN_ON_ONCE(id > INT_MAX)) - return NULL; - *nextid = id; + *nextid = iter.index + base; return entry; } -EXPORT_SYMBOL(idr_get_next); +EXPORT_SYMBOL(idr_get_next_ul); /** - * idr_get_next_ul() - Find next populated entry. + * idr_get_next() - Find next populated entry. * @idr: IDR handle. * @nextid: Pointer to an ID. * @@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next); * to the ID of the found value. To use in a loop, the value pointed to by * nextid must be incremented by the user. */ -void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) +void *idr_get_next(struct idr *idr, int *nextid) { - struct radix_tree_iter iter; - void __rcu **slot; - unsigned long base = idr->idr_base; unsigned long id = *nextid; + void *entry = idr_get_next_ul(idr, &id); - id = (id < base) ? 0 : id - base; - slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); - if (!slot) + if (WARN_ON_ONCE(id > INT_MAX)) return NULL; - - *nextid = iter.index + base; - return rcu_dereference_raw(*slot); + *nextid = id; + return entry; } -EXPORT_SYMBOL(idr_get_next_ul); +EXPORT_SYMBOL(idr_get_next); /** * idr_replace() - replace pointer for given ID. -- cgit v1.2.3 From b7e9728f3d7fc5c5c8508d99f1675212af5cfd49 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 2 Nov 2019 00:25:08 -0400 Subject: idr: Fix idr_alloc_u32 on 32-bit systems Attempting to allocate an entry at 0xffffffff when one is already present would succeed in allocating one at 2^32, which would confuse everything. Return -ENOSPC in this case, as expected. Signed-off-by: Matthew Wilcox (Oracle) --- lib/radix-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 18c1dfbb1765..c8fa1d274530 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1529,7 +1529,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root, offset = radix_tree_find_next_bit(node, IDR_FREE, offset + 1); start = next_index(start, node, offset); - if (start > max) + if (start > max || start == 0) return ERR_PTR(-ENOSPC); while (offset == RADIX_TREE_MAP_SIZE) { offset = node->offset + 1; -- cgit v1.2.3 From 5cbf2fff3bba8d3c6a4d47c1754de1cf57e2b01f Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 5 Nov 2019 21:16:57 -0800 Subject: dump_stack: avoid the livelock of the dump_lock In the current code, we use the atomic_cmpxchg() to serialize the output of the dump_stack(), but this implementation suffers the thundering herd problem. We have observed such kind of livelock on a Marvell cn96xx board(24 cpus) when heavily using the dump_stack() in a kprobe handler. Actually we can let the competitors to wait for the releasing of the lock before jumping to atomic_cmpxchg(). This will definitely mitigate the thundering herd problem. Thanks Linus for the suggestion. [akpm@linux-foundation.org: fix comment] Link: http://lkml.kernel.org/r/20191030031637.6025-1-haokexin@gmail.com Fixes: b58d977432c8 ("dump_stack: serialize the output from dump_stack()") Signed-off-by: Kevin Hao Suggested-by: Linus Torvalds Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dump_stack.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 5cff72f18c4a..33ffbf308853 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -106,7 +106,12 @@ retry: was_locked = 1; } else { local_irq_restore(flags); - cpu_relax(); + /* + * Wait for the lock to release before jumping to + * atomic_cmpxchg() in order to mitigate the thundering herd + * problem. + */ + do { cpu_relax(); } while (atomic_read(&dump_lock) != -1); goto retry; } -- cgit v1.2.3