summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 22:08:17 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 22:08:17 +0100
commit512b7931ad0561ffe14265f9ff554a3c081b476b (patch)
treea94450d08468e094d2d92a495de4650faab09c1f /lib
parentMerge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi (diff)
parentmm/damon: remove return value from before_terminate callback (diff)
downloadlinux-512b7931ad0561ffe14265f9ff554a3c081b476b.tar.xz
linux-512b7931ad0561ffe14265f9ff554a3c081b476b.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "257 patches. Subsystems affected by this patch series: scripts, ocfs2, vfs, and mm (slab-generic, slab, slub, kconfig, dax, kasan, debug, pagecache, gup, swap, memcg, pagemap, mprotect, mremap, iomap, tracing, vmalloc, pagealloc, memory-failure, hugetlb, userfaultfd, vmscan, tools, memblock, oom-kill, hugetlbfs, migration, thp, readahead, nommu, ksm, vmstat, madvise, memory-hotplug, rmap, zsmalloc, highmem, zram, cleanups, kfence, and damon)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (257 commits) mm/damon: remove return value from before_terminate callback mm/damon: fix a few spelling mistakes in comments and a pr_debug message mm/damon: simplify stop mechanism Docs/admin-guide/mm/pagemap: wordsmith page flags descriptions Docs/admin-guide/mm/damon/start: simplify the content Docs/admin-guide/mm/damon/start: fix a wrong link Docs/admin-guide/mm/damon/start: fix wrong example commands mm/damon/dbgfs: add adaptive_targets list check before enable monitor_on mm/damon: remove unnecessary variable initialization Documentation/admin-guide/mm/damon: add a document for DAMON_RECLAIM mm/damon: introduce DAMON-based Reclamation (DAMON_RECLAIM) selftests/damon: support watermarks mm/damon/dbgfs: support watermarks mm/damon/schemes: activate schemes based on a watermarks mechanism tools/selftests/damon: update for regions prioritization of schemes mm/damon/dbgfs: support prioritization weights mm/damon/vaddr,paddr: support pageout prioritization mm/damon/schemes: prioritize regions within the quotas mm/damon/selftests: support schemes quotas mm/damon/dbgfs: support quotas of schemes ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.kfence26
-rw-r--r--lib/bootconfig.c2
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/stackdepot.c72
-rw-r--r--lib/test_kasan.c26
-rw-r--r--lib/test_kasan_module.c2
-rw-r--r--lib/test_vmalloc.c6
8 files changed, 85 insertions, 53 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6fdbf9613aec..9ef7ce18b4f5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -877,7 +877,7 @@ config DEBUG_MEMORY_INIT
config MEMORY_NOTIFIER_ERROR_INJECT
tristate "Memory hotplug notifier error injection module"
- depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
+ depends on MEMORY_HOTPLUG && NOTIFIER_ERROR_INJECTION
help
This option provides the ability to inject artificial errors to
memory hotplug notifier chain callbacks. It is controlled through
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index e641add33947..912f252a41fc 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -25,17 +25,6 @@ menuconfig KFENCE
if KFENCE
-config KFENCE_STATIC_KEYS
- bool "Use static keys to set up allocations"
- default y
- depends on JUMP_LABEL # To ensure performance, require jump labels
- help
- Use static keys (static branches) to set up KFENCE allocations. Using
- static keys is normally recommended, because it avoids a dynamic
- branch in the allocator's fast path. However, with very low sample
- intervals, or on systems that do not support jump labels, a dynamic
- branch may still be an acceptable performance trade-off.
-
config KFENCE_SAMPLE_INTERVAL
int "Default sample interval in milliseconds"
default 100
@@ -56,6 +45,21 @@ config KFENCE_NUM_OBJECTS
pages are required; with one containing the object and two adjacent
ones used as guard pages.
+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations" if EXPERT
+ depends on JUMP_LABEL
+ help
+ Use static keys (static branches) to set up KFENCE allocations. This
+ option is only recommended when using very large sample intervals, or
+ performance has carefully been evaluated with this option.
+
+ Using static keys comes with trade-offs that need to be carefully
+ evaluated given target workloads and system architectures. Notably,
+ enabling and disabling static keys invoke IPI broadcasts, the latency
+ and impact of which is much harder to predict than a dynamic branch.
+
+ Say N if you are unsure.
+
config KFENCE_STRESS_TEST_FAULTS
int "Stress testing of fault handling and error reporting" if EXPERT
default 0
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 70e0d52ffd24..74f3201ab8e5 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -50,7 +50,7 @@ static inline void * __init xbc_alloc_mem(size_t size)
static inline void __init xbc_free_mem(void *addr, size_t size)
{
- memblock_free_ptr(addr, size);
+ memblock_free(addr, size);
}
#else /* !__KERNEL__ */
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c3c76b833384..a971a82d2f43 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -188,7 +188,7 @@ EXPORT_SYMBOL(free_cpumask_var);
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
- memblock_free_early(__pa(mask), cpumask_size());
+ memblock_free(mask, cpumask_size());
}
#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 0a2e417f83cb..09485dc5bd12 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -20,7 +20,6 @@
*/
#include <linux/gfp.h>
-#include <linux/interrupt.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -102,8 +101,8 @@ static bool init_stack_slab(void **prealloc)
}
/* Allocation of a new stack in raw storage */
-static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
- u32 hash, void **prealloc, gfp_t alloc_flags)
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
@@ -248,17 +247,28 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
- * stack_depot_save - Save a stack trace from an array
+ * __stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
+ * @can_alloc: Allocate stack slabs (increased chance of failure if false)
+ *
+ * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
+ * %true, is allowed to replenish the stack slab pool in case no space is left
+ * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
+ * any allocations and will fail if no space is left to store the stack trace.
*
- * Return: The handle of the stack struct stored in depot
+ * Context: Any context, but setting @can_alloc to %false is required if
+ * alloc_pages() cannot be used from the current context. Currently
+ * this is the case from contexts where neither %GFP_ATOMIC nor
+ * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+ *
+ * Return: The handle of the stack struct stored in depot, 0 on failure.
*/
-depot_stack_handle_t stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags)
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
depot_stack_handle_t retval = 0;
@@ -291,7 +301,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
*/
- if (unlikely(!smp_load_acquire(&next_slab_inited))) {
+ if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -309,9 +319,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
- struct stack_record *new =
- depot_alloc_stack(entries, nr_entries,
- hash, &prealloc, alloc_flags);
+ struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+
if (new) {
new->next = *bucket;
/*
@@ -340,27 +349,24 @@ exit:
fast_exit:
return retval;
}
-EXPORT_SYMBOL_GPL(stack_depot_save);
-
-static inline int in_irqentry_text(unsigned long ptr)
-{
- return (ptr >= (unsigned long)&__irqentry_text_start &&
- ptr < (unsigned long)&__irqentry_text_end) ||
- (ptr >= (unsigned long)&__softirqentry_text_start &&
- ptr < (unsigned long)&__softirqentry_text_end);
-}
+EXPORT_SYMBOL_GPL(__stack_depot_save);
-unsigned int filter_irq_stacks(unsigned long *entries,
- unsigned int nr_entries)
+/**
+ * stack_depot_save - Save a stack trace from an array
+ *
+ * @entries: Pointer to storage array
+ * @nr_entries: Size of the storage array
+ * @alloc_flags: Allocation gfp flags
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed.
+ * See __stack_depot_save() for more details.
+ *
+ * Return: The handle of the stack struct stored in depot, 0 on failure.
+ */
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags)
{
- unsigned int i;
-
- for (i = 0; i < nr_entries; i++) {
- if (in_irqentry_text(entries[i])) {
- /* Include the irqentry function into the stack. */
- return i + 1;
- }
- }
- return nr_entries;
+ return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
-EXPORT_SYMBOL_GPL(filter_irq_stacks);
+EXPORT_SYMBOL_GPL(stack_depot_save);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ebed755ebf34..67ed689a0b1b 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -440,6 +440,7 @@ static void kmalloc_oob_memset_2(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
kfree(ptr);
}
@@ -452,6 +453,7 @@ static void kmalloc_oob_memset_4(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
kfree(ptr);
}
@@ -464,6 +466,7 @@ static void kmalloc_oob_memset_8(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
kfree(ptr);
}
@@ -476,6 +479,7 @@ static void kmalloc_oob_memset_16(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
kfree(ptr);
}
@@ -488,16 +492,17 @@ static void kmalloc_oob_in_memset(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
memset(ptr, 0, size + KASAN_GRANULE_SIZE));
kfree(ptr);
}
-static void kmalloc_memmove_invalid_size(struct kunit *test)
+static void kmalloc_memmove_negative_size(struct kunit *test)
{
char *ptr;
size_t size = 64;
- volatile size_t invalid_size = -2;
+ size_t invalid_size = -2;
/*
* Hardware tag-based mode doesn't check memmove for negative size.
@@ -510,6 +515,22 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
+ OPTIMIZER_HIDE_VAR(invalid_size);
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ memmove((char *)ptr, (char *)ptr + 4, invalid_size));
+ kfree(ptr);
+}
+
+static void kmalloc_memmove_invalid_size(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 64;
+ volatile size_t invalid_size = size;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ memset((char *)ptr, 0, 64);
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
@@ -1129,6 +1150,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_memset_4),
KUNIT_CASE(kmalloc_oob_memset_8),
KUNIT_CASE(kmalloc_oob_memset_16),
+ KUNIT_CASE(kmalloc_memmove_negative_size),
KUNIT_CASE(kmalloc_memmove_invalid_size),
KUNIT_CASE(kmalloc_uaf),
KUNIT_CASE(kmalloc_uaf_memset),
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index 7ebf433edef3..b112cbc835e9 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -35,6 +35,8 @@ static noinline void __init copy_user_test(void)
return;
}
+ OPTIMIZER_HIDE_VAR(size);
+
pr_info("out-of-bounds in copy_from_user()\n");
unused = copy_from_user(kmem, usermem, size + 1);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index e14993bc84d2..cf41fd6df42a 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -393,7 +393,7 @@ static struct test_driver {
static void shuffle_array(int *arr, int n)
{
unsigned int rnd;
- int i, j, x;
+ int i, j;
for (i = n - 1; i > 0; i--) {
get_random_bytes(&rnd, sizeof(rnd));
@@ -402,9 +402,7 @@ static void shuffle_array(int *arr, int n)
j = rnd % i;
/* Swap indexes. */
- x = arr[i];
- arr[i] = arr[j];
- arr[j] = x;
+ swap(arr[i], arr[j]);
}
}