diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 23:38:01 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 23:38:01 +0200 |
commit | d42f323a7df0b298c07313db00b44b78555ca8e6 (patch) | |
tree | e9ac2b9f20fed683ff78b294c3792acb157787e5 /lib | |
parent | Merge tag 'pinctrl-v5.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
parent | mm/memory-failure: unnecessary amount of unmapping (diff) | |
download | linux-d42f323a7df0b298c07313db00b44b78555ca8e6.tar.xz linux-d42f323a7df0b298c07313db00b44b78555ca8e6.zip |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
"A few misc subsystems and some of MM.
175 patches.
Subsystems affected by this patch series: ia64, kbuild, scripts, sh,
ocfs2, kfifo, vfs, kernel/watchdog, and mm (slab-generic, slub,
kmemleak, debug, pagecache, msync, gup, memremap, memcg, pagemap,
mremap, dma, sparsemem, vmalloc, documentation, kasan, initialization,
pagealloc, and memory-failure)"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (175 commits)
mm/memory-failure: unnecessary amount of unmapping
mm/mmzone.h: fix existing kernel-doc comments and link them to core-api
mm: page_alloc: ignore init_on_free=1 for debug_pagealloc=1
net: page_pool: use alloc_pages_bulk in refill code path
net: page_pool: refactor dma_map into own function page_pool_dma_map
SUNRPC: refresh rq_pages using a bulk page allocator
SUNRPC: set rq_page_end differently
mm/page_alloc: inline __rmqueue_pcplist
mm/page_alloc: optimize code layout for __alloc_pages_bulk
mm/page_alloc: add an array-based interface to the bulk page allocator
mm/page_alloc: add a bulk page allocator
mm/page_alloc: rename alloced to allocated
mm/page_alloc: duplicate include linux/vmalloc.h
mm, page_alloc: avoid page_to_pfn() in move_freepages()
mm/Kconfig: remove default DISCONTIGMEM_MANUAL
mm: page_alloc: dump migrate-failed pages
mm/mempolicy: fix mpol_misplaced kernel-doc
mm/mempolicy: rewrite alloc_pages_vma documentation
mm/mempolicy: rewrite alloc_pages documentation
mm/mempolicy: rename alloc_pages_current to alloc_pages
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 9 | ||||
-rw-r--r-- | lib/test_kasan.c | 59 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 128 |
3 files changed, 79 insertions, 117 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b38bbb5bb00e..678c13967580 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2573,11 +2573,18 @@ config TEST_FPU endif # RUNTIME_TESTING_MENU +config ARCH_USE_MEMTEST + bool + help + An architecture should select this when it uses early_memtest() + during boot process. + config MEMTEST bool "Memtest" + depends on ARCH_USE_MEMTEST help This option adds a kernel parameter 'memtest', which allows memtest - to be set. + to be set and executed. memtest=0, mean disabled; -- default memtest=1, mean do 1 test pattern; ... diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 785e724ce0d8..dc05cfc2d12f 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -54,6 +54,10 @@ static int kasan_test_init(struct kunit *test) multishot = kasan_save_enable_multi_shot(); kasan_set_tagging_report_once(false); + fail_data.report_found = false; + fail_data.report_expected = false; + kunit_add_named_resource(test, NULL, NULL, &resource, + "kasan_data", &fail_data); return 0; } @@ -61,6 +65,7 @@ static void kasan_test_exit(struct kunit *test) { kasan_set_tagging_report_once(true); kasan_restore_multi_shot(multishot); + KUNIT_EXPECT_FALSE(test, fail_data.report_found); } /** @@ -78,33 +83,31 @@ static void kasan_test_exit(struct kunit *test) * fields, it can reorder or optimize away the accesses to those fields. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the * expression to prevent that. + * + * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as + * false. This allows detecting KASAN reports that happen outside of the checks + * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL + * and in kasan_test_exit. */ -#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) \ - migrate_disable(); \ - WRITE_ONCE(fail_data.report_expected, true); \ - WRITE_ONCE(fail_data.report_found, false); \ - kunit_add_named_resource(test, \ - NULL, \ - NULL, \ - &resource, \ - "kasan_data", &fail_data); \ - barrier(); \ - expression; \ - barrier(); \ - if (kasan_async_mode_enabled()) \ - kasan_force_async_fault(); \ - barrier(); \ - KUNIT_EXPECT_EQ(test, \ - READ_ONCE(fail_data.report_expected), \ - READ_ONCE(fail_data.report_found)); \ - if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ - !kasan_async_mode_enabled()) { \ - if (READ_ONCE(fail_data.report_found)) \ - kasan_enable_tagging_sync(); \ - migrate_enable(); \ - } \ +#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ + !kasan_async_mode_enabled()) \ + migrate_disable(); \ + KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ + WRITE_ONCE(fail_data.report_expected, true); \ + barrier(); \ + expression; \ + barrier(); \ + KUNIT_EXPECT_EQ(test, \ + READ_ONCE(fail_data.report_expected), \ + READ_ONCE(fail_data.report_found)); \ + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ + if (READ_ONCE(fail_data.report_found)) \ + kasan_enable_tagging_sync(); \ + migrate_enable(); \ + } \ + WRITE_ONCE(fail_data.report_found, false); \ + WRITE_ONCE(fail_data.report_expected, false); \ } while (0) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ @@ -1049,14 +1052,14 @@ static void match_all_mem_tag(struct kunit *test) continue; /* Mark the first memory granule with the chosen memory tag. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag); + kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); /* This access must cause a KASAN report. */ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); } /* Recover the memory tag and free. */ - kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr)); + kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); kfree(ptr); } diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 5cf2fe9aab9e..01e9543de566 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -23,8 +23,8 @@ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) \ -__param(bool, single_cpu_test, false, - "Use single first online CPU to run tests"); +__param(int, nr_threads, 0, + "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); __param(bool, sequential_test_order, false, "Use sequential stress tests order"); @@ -47,19 +47,10 @@ __param(int, run_test_mask, INT_MAX, "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" - "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n" - "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n" /* Add a new test case description here. */ ); /* - * Depends on single_cpu_test parameter. If it is true, then - * use first online CPU to trigger a test on, otherwise go with - * all online CPUs. - */ -static cpumask_t cpus_run_test_mask = CPU_MASK_NONE; - -/* * Read write semaphore for synchronization of setup * phase that is done in main thread and workers. */ @@ -363,42 +354,6 @@ kvfree_rcu_2_arg_vmalloc_test(void) return 0; } -static int -kvfree_rcu_1_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p); - } - - return 0; -} - -static int -kvfree_rcu_2_arg_slab_test(void) -{ - struct test_kvfree_rcu *p; - int i; - - for (i = 0; i < test_loop_count; i++) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -1; - - p->array[0] = 'a'; - kvfree_rcu(p, rcu); - } - - return 0; -} - struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -415,8 +370,6 @@ static struct test_case_desc test_case_array[] = { { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, - { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test }, - { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test }, /* Add a new test case here. */ }; @@ -426,16 +379,13 @@ struct test_case_data { u64 time; }; -/* Split it to get rid of: WARNING: line over 80 characters */ -static struct test_case_data - per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)]; - static struct test_driver { struct task_struct *task; + struct test_case_data data[ARRAY_SIZE(test_case_array)]; + unsigned long start; unsigned long stop; - int cpu; -} per_cpu_test_driver[NR_CPUS]; +} *tdriver; static void shuffle_array(int *arr, int n) { @@ -463,9 +413,6 @@ static int test_func(void *private) ktime_t kt; u64 delta; - if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0) - pr_err("Failed to set affinity to %d CPU\n", t->cpu); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) random_array[i] = i; @@ -490,9 +437,9 @@ static int test_func(void *private) kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { if (!test_case_array[index].test_func()) - per_cpu_test_data[t->cpu][index].test_passed++; + t->data[index].test_passed++; else - per_cpu_test_data[t->cpu][index].test_failed++; + t->data[index].test_failed++; } /* @@ -501,7 +448,7 @@ static int test_func(void *private) delta = (u64) ktime_us_delta(ktime_get(), kt); do_div(delta, (u32) test_repeat_count); - per_cpu_test_data[t->cpu][index].time = delta; + t->data[index].time = delta; } t->stop = get_cycles(); @@ -517,53 +464,56 @@ static int test_func(void *private) return 0; } -static void +static int init_test_configurtion(void) { /* - * Reset all data of all CPUs. + * A maximum number of workers is defined as hard-coded + * value and set to USHRT_MAX. We add such gap just in + * case and for potential heavy stressing. */ - memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data)); + nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); - if (single_cpu_test) - cpumask_set_cpu(cpumask_first(cpu_online_mask), - &cpus_run_test_mask); - else - cpumask_and(&cpus_run_test_mask, cpu_online_mask, - cpu_online_mask); + /* Allocate the space for test instances. */ + tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); + if (tdriver == NULL) + return -1; if (test_repeat_count <= 0) test_repeat_count = 1; if (test_loop_count <= 0) test_loop_count = 1; + + return 0; } static void do_concurrent_test(void) { - int cpu, ret; + int i, ret; /* * Set some basic configurations plus sanity check. */ - init_test_configurtion(); + ret = init_test_configurtion(); + if (ret < 0) + return; /* * Put on hold all workers. */ down_write(&prepare_for_test_rwsem); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; - t->cpu = cpu; - t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu); + t->task = kthread_run(test_func, t, "vmalloc_test/%d", i); if (!IS_ERR(t->task)) /* Success. */ atomic_inc(&test_n_undone); else - pr_err("Failed to start kthread for %d CPU\n", cpu); + pr_err("Failed to start %d kthread\n", i); } /* @@ -581,29 +531,31 @@ static void do_concurrent_test(void) ret = wait_for_completion_timeout(&test_all_done_comp, HZ); } while (!ret); - for_each_cpu(cpu, &cpus_run_test_mask) { - struct test_driver *t = &per_cpu_test_driver[cpu]; - int i; + for (i = 0; i < nr_threads; i++) { + struct test_driver *t = &tdriver[i]; + int j; if (!IS_ERR(t->task)) kthread_stop(t->task); - for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { - if (!((run_test_mask & (1 << i)) >> i)) + for (j = 0; j < ARRAY_SIZE(test_case_array); j++) { + if (!((run_test_mask & (1 << j)) >> j)) continue; pr_info( "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", - test_case_array[i].test_name, - per_cpu_test_data[cpu][i].test_passed, - per_cpu_test_data[cpu][i].test_failed, + test_case_array[j].test_name, + t->data[j].test_passed, + t->data[j].test_failed, test_repeat_count, test_loop_count, - per_cpu_test_data[cpu][i].time); + t->data[j].time); } - pr_info("All test took CPU%d=%lu cycles\n", - cpu, t->stop - t->start); + pr_info("All test took worker%d=%lu cycles\n", + i, t->stop - t->start); } + + kvfree(tdriver); } static int vmalloc_test_init(void) |