diff options
Diffstat (limited to 'lib')
30 files changed, 630 insertions, 1450 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5429e6f170f3..2164f066e7b6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1303,7 +1303,7 @@ config PROVE_LOCKING select DEBUG_SPINLOCK select DEBUG_MUTEXES if !PREEMPT_RT select DEBUG_RT_MUTEXES if RT_MUTEXES - select DEBUG_RWSEMS + select DEBUG_RWSEMS if !PREEMPT_RT select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC select PREEMPT_COUNT if !ARCH_NO_PREEMPT @@ -1426,7 +1426,7 @@ config DEBUG_WW_MUTEX_SLOWPATH config DEBUG_RWSEMS bool "RW Semaphore debugging: basic checks" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT help This debugging feature allows mismatched rw semaphore locks and unlocks to be detected and reported. @@ -2235,6 +2235,7 @@ config TEST_DIV64 config TEST_IOV_ITER tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS depends on KUNIT + depends on MMU default KUNIT_ALL_TESTS help Enable this to turn on testing of the operation of the I/O iterator @@ -2861,28 +2862,6 @@ config TEST_MEMCAT_P If unsure, say N. -config TEST_LIVEPATCH - tristate "Test livepatching" - default n - depends on DYNAMIC_DEBUG - depends on LIVEPATCH - depends on m - help - Test kernel livepatching features for correctness. The tests will - load test modules that will be livepatched in various scenarios. - - To run all the livepatching tests: - - make -C tools/testing/selftests TARGETS=livepatch run_tests - - Alternatively, individual tests may be invoked: - - tools/testing/selftests/livepatch/test-callbacks.sh - tools/testing/selftests/livepatch/test-livepatch.sh - tools/testing/selftests/livepatch/test-shadow-vars.sh - - If unsure, say N. - config TEST_OBJAGG tristate "Perform selftest on object aggreration manager" default n diff --git a/lib/Makefile b/lib/Makefile index 97c42e38046f..363852afa200 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -135,8 +135,6 @@ endif obj-$(CONFIG_TEST_FPU) += test_fpu.o CFLAGS_test_fpu.o += $(FPU_CFLAGS) -obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ - # Some KUnit files (hooks.o) need to be built-in even when KUnit is a module, # so we can't just use obj-$(CONFIG_KUNIT). ifdef CONFIG_KUNIT diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c index 225bb7701460..bf70850035c7 100644 --- a/lib/checksum_kunit.c +++ b/lib/checksum_kunit.c @@ -215,7 +215,7 @@ static const u32 init_sums_no_overflow[] = { 0xffff0000, 0xfffffffb, }; -static const __sum16 expected_csum_ipv6_magic[] = { +static const u16 expected_csum_ipv6_magic[] = { 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0, 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e, 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d, @@ -241,7 +241,7 @@ static const __sum16 expected_csum_ipv6_magic[] = { 0x3845, 0x1014 }; -static const __sum16 expected_fast_csum[] = { +static const u16 expected_fast_csum[] = { 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187, 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a, 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a, @@ -577,7 +577,8 @@ static void test_csum_no_carry_inputs(struct kunit *test) static void test_ip_fast_csum(struct kunit *test) { - __sum16 csum_result, expected; + __sum16 csum_result; + u16 expected; for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) { for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) { @@ -586,7 +587,7 @@ static void test_ip_fast_csum(struct kunit *test) expected_fast_csum[(len - IPv4_MIN_WORDS) * NUM_IP_FAST_CSUM_TESTS + index]; - CHECK_EQ(expected, csum_result); + CHECK_EQ(to_sum16(expected), csum_result); } } } @@ -598,7 +599,7 @@ static void test_csum_ipv6_magic(struct kunit *test) const struct in6_addr *daddr; unsigned int len; unsigned char proto; - unsigned int csum; + __wsum csum; const int daddr_offset = sizeof(struct in6_addr); const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr); @@ -611,10 +612,10 @@ static void test_csum_ipv6_magic(struct kunit *test) saddr = (const struct in6_addr *)(random_buf + i); daddr = (const struct in6_addr *)(random_buf + i + daddr_offset); - len = *(unsigned int *)(random_buf + i + len_offset); + len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset)); proto = *(random_buf + i + proto_offset); - csum = *(unsigned int *)(random_buf + i + csum_offset); - CHECK_EQ(expected_csum_ipv6_magic[i], + csum = *(__wsum *)(random_buf + i + csum_offset); + CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]), csum_ipv6_magic(saddr, daddr, len, proto, csum)); } #endif /* !CONFIG_NET */ diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c index d4572dbc9145..705b82736be0 100644 --- a/lib/cmdline_kunit.c +++ b/lib/cmdline_kunit.c @@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in, n, e[0], r[0]); p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0])); - KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r); + KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r); } static void cmdline_test_range(struct kunit *test) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e0aa6b440ca5..4a6a9f419bd7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, - .copy_mc = false, .nofault = false, .data_source = direction, .__iov = iov, @@ -245,26 +244,8 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ static __always_inline -size_t memcpy_from_iter_mc(void *iter_from, size_t progress, - size_t len, void *to, void *priv2) -{ - return copy_mc_to_kernel(to + progress, iter_from, len); -} - -static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) -{ - if (unlikely(i->count < bytes)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); -} - -static __always_inline size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_copy_mc(i))) - return __copy_from_iter_mc(addr, bytes, i); return iterate_and_advance(i, bytes, addr, copy_from_user_iter, memcpy_from_iter); } @@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_KVEC, - .copy_mc = false, .data_source = direction, .kvec = kvec, .nr_segs = nr_segs, @@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_BVEC, - .copy_mc = false, .data_source = direction, .bvec = bvec, .nr_segs = nr_segs, @@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, BUG_ON(direction & ~1); *i = (struct iov_iter) { .iter_type = ITER_XARRAY, - .copy_mc = false, .data_source = direction, .xarray = xarray, .xarray_start = start, @@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) BUG_ON(direction != READ); *i = (struct iov_iter){ .iter_type = ITER_DISCARD, - .copy_mc = false, .data_source = false, .count = count, .iov_offset = 0 @@ -714,12 +691,11 @@ EXPORT_SYMBOL(iov_iter_discard); static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { + const struct iovec *iov = iter_iov(i); size_t size = i->count; size_t skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - const struct iovec *iov = iter_iov(i) + k; + do { size_t len = iov->iov_len - skip; if (len > size) @@ -729,34 +705,36 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, if ((unsigned long)(iov->iov_base + skip) & addr_mask) return false; + iov++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return true; } static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { - size_t size = i->count; + const struct bio_vec *bvec = i->bvec; unsigned skip = i->iov_offset; - unsigned k; + size_t size = i->count; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->bvec[k].bv_len - skip; + do { + size_t len = bvec->bv_len; if (len > size) len = size; if (len & len_mask) return false; - if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) + if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) return false; + bvec++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return true; } @@ -800,13 +778,12 @@ EXPORT_SYMBOL_GPL(iov_iter_is_aligned); static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) { + const struct iovec *iov = iter_iov(i); unsigned long res = 0; size_t size = i->count; size_t skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - const struct iovec *iov = iter_iov(i) + k; + do { size_t len = iov->iov_len - skip; if (len) { res |= (unsigned long)iov->iov_base + skip; @@ -814,30 +791,31 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) len = size; res |= len; size -= len; - if (!size) - break; } - } + iov++; + skip = 0; + } while (size); return res; } static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) { + const struct bio_vec *bvec = i->bvec; unsigned res = 0; size_t size = i->count; unsigned skip = i->iov_offset; - unsigned k; - for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->bvec[k].bv_len - skip; - res |= (unsigned long)i->bvec[k].bv_offset + skip; + do { + size_t len = bvec->bv_len - skip; + res |= (unsigned long)bvec->bv_offset + skip; if (len > size) len = size; res |= len; + bvec++; size -= len; - if (!size) - break; - } + skip = 0; + } while (size); + return res; } @@ -1166,11 +1144,12 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) EXPORT_SYMBOL(dup_iter); static __noclone int copy_compat_iovec_from_user(struct iovec *iov, - const struct iovec __user *uvec, unsigned long nr_segs) + const struct iovec __user *uvec, u32 nr_segs) { const struct compat_iovec __user *uiov = (const struct compat_iovec __user *)uvec; - int ret = -EFAULT, i; + int ret = -EFAULT; + u32 i; if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT; diff --git a/lib/kobject.c b/lib/kobject.c index 59dbcbdb1c91..72fa20f405f1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj) if (error) return error; - error = sysfs_create_groups(kobj, ktype->default_groups); - if (error) { - sysfs_remove_dir(kobj); - return error; + if (ktype) { + error = sysfs_create_groups(kobj, ktype->default_groups); + if (error) { + sysfs_remove_dir(kobj); + return error; + } } /* @@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj) sd = kobj->sd; ktype = get_ktype(kobj); - sysfs_remove_groups(kobj, ktype->default_groups); + if (ktype) + sysfs_remove_groups(kobj, ktype->default_groups); /* send "remove" if the caller did not do it but sent "add" */ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { @@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj) pr_debug("'%s' (%p): %s, parent %p\n", kobject_name(kobj), kobj, __func__, kobj->parent); + if (t && !t->release) + pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", + kobject_name(kobj), kobj); + /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { pr_debug("'%s' (%p): auto cleanup kobject_del\n", @@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj) parent = NULL; } - if (t->release) { + if (t && t->release) { pr_debug("'%s' (%p): calling ktype release\n", kobject_name(kobj), kobj); t->release(kobj); - } else { - pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", - kobject_name(kobj), kobj); } /* free name if we allocated it */ @@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa { const struct kobj_ns_type_operations *ops = NULL; - if (parent && parent->ktype->child_ns_type) + if (parent && parent->ktype && parent->ktype->child_ns_type) ops = parent->ktype->child_ns_type(parent); return ops; diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h index 54bd55836405..5fcd48ff0f36 100644 --- a/lib/kunit/device-impl.h +++ b/lib/kunit/device-impl.h @@ -13,5 +13,7 @@ // For internal use only -- registers the kunit_bus. int kunit_bus_init(void); +// For internal use only -- unregisters the kunit_bus. +void kunit_bus_shutdown(void); #endif //_KUNIT_DEVICE_IMPL_H diff --git a/lib/kunit/device.c b/lib/kunit/device.c index f5371287b375..abc603730b8e 100644 --- a/lib/kunit/device.c +++ b/lib/kunit/device.c @@ -10,6 +10,7 @@ */ #include <linux/device.h> +#include <linux/dma-mapping.h> #include <kunit/test.h> #include <kunit/device.h> @@ -35,7 +36,7 @@ struct kunit_device { #define to_kunit_device(d) container_of_const(d, struct kunit_device, dev) -static struct bus_type kunit_bus_type = { +static const struct bus_type kunit_bus_type = { .name = "kunit", }; @@ -45,8 +46,8 @@ int kunit_bus_init(void) int error; kunit_bus_device = root_device_register("kunit"); - if (!kunit_bus_device) - return -ENOMEM; + if (IS_ERR(kunit_bus_device)) + return PTR_ERR(kunit_bus_device); error = bus_register(&kunit_bus_type); if (error) @@ -54,6 +55,20 @@ int kunit_bus_init(void) return error; } +/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */ +void kunit_bus_shutdown(void) +{ + /* Make sure the bus exists before we unregister it. */ + if (IS_ERR_OR_NULL(kunit_bus_device)) + return; + + bus_unregister(&kunit_bus_type); + + root_device_unregister(kunit_bus_device); + + kunit_bus_device = NULL; +} + /* Release a 'fake' KUnit device. */ static void kunit_device_release(struct device *d) { @@ -119,6 +134,9 @@ static struct kunit_device *kunit_device_register_internal(struct kunit *test, return ERR_PTR(err); } + kunit_dev->dev.dma_mask = &kunit_dev->dev.coherent_dma_mask; + kunit_dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev); return kunit_dev; diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 717b9599036b..70b9a43cd257 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -33,13 +33,13 @@ static char *filter_glob_param; static char *filter_param; static char *filter_action_param; -module_param_named(filter_glob, filter_glob_param, charp, 0400); +module_param_named(filter_glob, filter_glob_param, charp, 0600); MODULE_PARM_DESC(filter_glob, "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test"); -module_param_named(filter, filter_param, charp, 0400); +module_param_named(filter, filter_param, charp, 0600); MODULE_PARM_DESC(filter, "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow"); -module_param_named(filter_action, filter_action_param, charp, 0400); +module_param_named(filter_action, filter_action_param, charp, 0600); MODULE_PARM_DESC(filter_action, "Changes behavior of filtered tests using attributes, valid values are:\n" "<none>: do not run filtered tests as normal\n" @@ -146,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set) kfree(suite_set.start); } +/* + * Filter and reallocate test suites. Must return the filtered test suites set + * allocated at a valid virtual address or NULL in case of error. + */ struct kunit_suite_set kunit_filter_suites(const struct kunit_suite_set *suite_set, const char *filter_glob, diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 22d4ee86dbed..3f7f967e3688 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -129,7 +129,7 @@ static void parse_filter_attr_test(struct kunit *test) GFP_KERNEL); for (j = 0; j < filter_count; j++) { parsed_filters[j] = kunit_next_attr_filter(&filter, &err); - KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]); + KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter from '%s'", filters); } KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed"); diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index c4259d910356..f7980ef236a3 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -720,7 +720,7 @@ static void kunit_device_cleanup_test(struct kunit *test) long action_was_run = 0; test_device = kunit_device_register(test, "my_device"); - KUNIT_ASSERT_NOT_NULL(test, test_device); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device); /* Add an action to verify cleanup. */ devm_add_action(test_device, test_dev_action, &action_was_run); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index f95d2093a0aa..1d1475578515 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -17,6 +17,7 @@ #include <linux/panic.h> #include <linux/sched/debug.h> #include <linux/sched.h> +#include <linux/mm.h> #include "debugfs.h" #include "device-impl.h" @@ -801,12 +802,19 @@ static void kunit_module_exit(struct module *mod) }; const char *action = kunit_action(); + /* + * Check if the start address is a valid virtual address to detect + * if the module load sequence has failed and the suite set has not + * been initialized and filtered. + */ + if (!suite_set.start || !virt_addr_valid(suite_set.start)) + return; + if (!action) __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites); - if (suite_set.start) - kunit_free_suite_set(suite_set); + kunit_free_suite_set(suite_set); } static int kunit_module_notify(struct notifier_block *nb, unsigned long val, @@ -816,12 +824,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val, switch (val) { case MODULE_STATE_LIVE: + kunit_module_init(mod); break; case MODULE_STATE_GOING: kunit_module_exit(mod); break; case MODULE_STATE_COMING: - kunit_module_init(mod); break; case MODULE_STATE_UNFORMED: break; @@ -920,6 +928,9 @@ static void __exit kunit_exit(void) #ifdef CONFIG_MODULES unregister_module_notifier(&kunit_mod_nb); #endif + + kunit_bus_shutdown(); + kunit_debugfs_cleanup(); } module_exit(kunit_exit); diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile deleted file mode 100644 index dcc912b3478f..000000000000 --- a/lib/livepatch/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for livepatch test code. - -obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \ - test_klp_callbacks_demo.o \ - test_klp_callbacks_demo2.o \ - test_klp_callbacks_busy.o \ - test_klp_callbacks_mod.o \ - test_klp_livepatch.o \ - test_klp_shadow_vars.o \ - test_klp_state.o \ - test_klp_state2.o \ - test_klp_state3.o diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c deleted file mode 100644 index 5af7093ca00c..000000000000 --- a/lib/livepatch/test_klp_atomic_replace.c +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int replace; -module_param(replace, int, 0644); -MODULE_PARM_DESC(replace, "replace (default=0)"); - -#include <linux/seq_file.h> -static int livepatch_meminfo_proc_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%s: %s\n", THIS_MODULE->name, - "this has been live patched"); - return 0; -} - -static struct klp_func funcs[] = { - { - .old_name = "meminfo_proc_show", - .new_func = livepatch_meminfo_proc_show, - }, {} -}; - -static struct klp_object objs[] = { - { - /* name being NULL means vmlinux */ - .funcs = funcs, - }, {} -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - /* set .replace in the init function below for demo purposes */ -}; - -static int test_klp_atomic_replace_init(void) -{ - patch.replace = replace; - return klp_enable_patch(&patch); -} - -static void test_klp_atomic_replace_exit(void) -{ -} - -module_init(test_klp_atomic_replace_init); -module_exit(test_klp_atomic_replace_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: atomic replace"); diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c deleted file mode 100644 index 133929e0ce8f..000000000000 --- a/lib/livepatch/test_klp_callbacks_busy.c +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/workqueue.h> -#include <linux/delay.h> - -/* load/run-time control from sysfs writer */ -static bool block_transition; -module_param(block_transition, bool, 0644); -MODULE_PARM_DESC(block_transition, "block_transition (default=false)"); - -static void busymod_work_func(struct work_struct *work); -static DECLARE_WORK(work, busymod_work_func); -static DECLARE_COMPLETION(busymod_work_started); - -static void busymod_work_func(struct work_struct *work) -{ - pr_info("%s enter\n", __func__); - complete(&busymod_work_started); - - while (READ_ONCE(block_transition)) { - /* - * Busy-wait until the sysfs writer has acknowledged a - * blocked transition and clears the flag. - */ - msleep(20); - } - - pr_info("%s exit\n", __func__); -} - -static int test_klp_callbacks_busy_init(void) -{ - pr_info("%s\n", __func__); - schedule_work(&work); - - /* - * To synchronize kernel messages, hold the init function from - * exiting until the work function's entry message has printed. - */ - wait_for_completion(&busymod_work_started); - - if (!block_transition) { - /* - * Serialize output: print all messages from the work - * function before returning from init(). - */ - flush_work(&work); - } - - return 0; -} - -static void test_klp_callbacks_busy_exit(void) -{ - WRITE_ONCE(block_transition, false); - flush_work(&work); - pr_info("%s\n", __func__); -} - -module_init(test_klp_callbacks_busy_init); -module_exit(test_klp_callbacks_busy_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: busy target module"); diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c deleted file mode 100644 index 3fd8fe1cd1cc..000000000000 --- a/lib/livepatch/test_klp_callbacks_demo.c +++ /dev/null @@ -1,121 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int pre_patch_ret; -module_param(pre_patch_ret, int, 0644); -MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)"); - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return pre_patch_ret; -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -static void patched_work_func(struct work_struct *work) -{ - pr_info("%s\n", __func__); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_func busymod_funcs[] = { - { - .old_name = "busymod_work_func", - .new_func = patched_work_func, - }, {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { - .name = "test_klp_callbacks_mod", - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { - .name = "test_klp_callbacks_busy", - .funcs = busymod_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch demo"); diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c deleted file mode 100644 index 5417573e80af..000000000000 --- a/lib/livepatch/test_klp_callbacks_demo2.c +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -static int replace; -module_param(replace, int, 0644); -MODULE_PARM_DESC(replace, "replace (default=0)"); - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return 0; -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); -} - -static struct klp_func no_funcs[] = { - { } -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - /* set .replace in the init function below for demo purposes */ -}; - -static int test_klp_callbacks_demo2_init(void) -{ - patch.replace = replace; - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo2_exit(void) -{ -} - -module_init(test_klp_callbacks_demo2_init); -module_exit(test_klp_callbacks_demo2_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch demo2"); diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c deleted file mode 100644 index 8fbe645b1c2c..000000000000 --- a/lib/livepatch/test_klp_callbacks_mod.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> - -static int test_klp_callbacks_mod_init(void) -{ - pr_info("%s\n", __func__); - return 0; -} - -static void test_klp_callbacks_mod_exit(void) -{ - pr_info("%s\n", __func__); -} - -module_init(test_klp_callbacks_mod_init); -module_exit(test_klp_callbacks_mod_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: target module"); diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c deleted file mode 100644 index aff08199de71..000000000000 --- a/lib/livepatch/test_klp_livepatch.c +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/livepatch.h> - -#include <linux/seq_file.h> -static int livepatch_cmdline_proc_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%s: %s\n", THIS_MODULE->name, - "this has been live patched"); - return 0; -} - -static struct klp_func funcs[] = { - { - .old_name = "cmdline_proc_show", - .new_func = livepatch_cmdline_proc_show, - }, { } -}; - -static struct klp_object objs[] = { - { - /* name being NULL means vmlinux */ - .funcs = funcs, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, -}; - -static int test_klp_livepatch_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_livepatch_exit(void) -{ -} - -module_init(test_klp_livepatch_init); -module_exit(test_klp_livepatch_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: livepatch module"); diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c deleted file mode 100644 index b99116490858..000000000000 --- a/lib/livepatch/test_klp_shadow_vars.c +++ /dev/null @@ -1,301 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com> - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/livepatch.h> -#include <linux/slab.h> - -/* - * Keep a small list of pointers so that we can print address-agnostic - * pointer values. Use a rolling integer count to differentiate the values. - * Ironically we could have used the shadow variable API to do this, but - * let's not lean too heavily on the very code we're testing. - */ -static LIST_HEAD(ptr_list); -struct shadow_ptr { - void *ptr; - int id; - struct list_head list; -}; - -static void free_ptr_list(void) -{ - struct shadow_ptr *sp, *tmp_sp; - - list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) { - list_del(&sp->list); - kfree(sp); - } -} - -static int ptr_id(void *ptr) -{ - struct shadow_ptr *sp; - static int count; - - list_for_each_entry(sp, &ptr_list, list) { - if (sp->ptr == ptr) - return sp->id; - } - - sp = kmalloc(sizeof(*sp), GFP_ATOMIC); - if (!sp) - return -ENOMEM; - sp->ptr = ptr; - sp->id = count++; - - list_add(&sp->list, &ptr_list); - - return sp->id; -} - -/* - * Shadow variable wrapper functions that echo the function and arguments - * to the kernel log for testing verification. Don't display raw pointers, - * but use the ptr_id() value instead. - */ -static void *shadow_get(void *obj, unsigned long id) -{ - int **sv; - - sv = klp_shadow_get(obj, id); - pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n", - __func__, ptr_id(obj), id, ptr_id(sv)); - - return sv; -} - -static void *shadow_alloc(void *obj, unsigned long id, size_t size, - gfp_t gfp_flags, klp_shadow_ctor_t ctor, - void *ctor_data) -{ - int **var = ctor_data; - int **sv; - - sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", - __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), - ptr_id(*var), ptr_id(sv)); - - return sv; -} - -static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size, - gfp_t gfp_flags, klp_shadow_ctor_t ctor, - void *ctor_data) -{ - int **var = ctor_data; - int **sv; - - sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n", - __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor), - ptr_id(*var), ptr_id(sv)); - - return sv; -} - -static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) -{ - klp_shadow_free(obj, id, dtor); - pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n", - __func__, ptr_id(obj), id, ptr_id(dtor)); -} - -static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) -{ - klp_shadow_free_all(id, dtor); - pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor)); -} - - -/* Shadow variable constructor - remember simple pointer data */ -static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data) -{ - int **sv = shadow_data; - int **var = ctor_data; - - if (!var) - return -EINVAL; - - *sv = *var; - pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var)); - - return 0; -} - -/* - * With more than one item to free in the list, order is not determined and - * shadow_dtor will not be passed to shadow_free_all() which would make the - * test fail. (see pass 6) - */ -static void shadow_dtor(void *obj, void *shadow_data) -{ - int **sv = shadow_data; - - pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n", - __func__, ptr_id(obj), ptr_id(sv)); -} - -/* number of objects we simulate that need shadow vars */ -#define NUM_OBJS 3 - -/* dynamically created obj fields have the following shadow var id values */ -#define SV_ID1 0x1234 -#define SV_ID2 0x1235 - -/* - * The main test case adds/removes new fields (shadow var) to each of these - * test structure instances. The last group of fields in the struct represent - * the idea that shadow variables may be added and removed to and from the - * struct during execution. - */ -struct test_object { - /* add anything here below and avoid to define an empty struct */ - struct shadow_ptr sp; - - /* these represent shadow vars added and removed with SV_ID{1,2} */ - /* char nfield1; */ - /* int nfield2; */ -}; - -static int test_klp_shadow_vars_init(void) -{ - struct test_object objs[NUM_OBJS]; - char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS]; - char *pndup[NUM_OBJS]; - int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS]; - void **sv; - int ret; - int i; - - ptr_id(NULL); - - /* - * With an empty shadow variable hash table, expect not to find - * any matches. - */ - sv = shadow_get(&objs[0], SV_ID1); - if (!sv) - pr_info(" got expected NULL result\n"); - - /* pass 1: init & alloc a char+int pair of svars for each objs */ - for (i = 0; i < NUM_OBJS; i++) { - pnfields1[i] = &nfields1[i]; - ptr_id(pnfields1[i]); - - if (i % 2) { - sv1[i] = shadow_alloc(&objs[i], SV_ID1, - sizeof(pnfields1[i]), GFP_KERNEL, - shadow_ctor, &pnfields1[i]); - } else { - sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1, - sizeof(pnfields1[i]), GFP_KERNEL, - shadow_ctor, &pnfields1[i]); - } - if (!sv1[i]) { - ret = -ENOMEM; - goto out; - } - - pnfields2[i] = &nfields2[i]; - ptr_id(pnfields2[i]); - sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]), - GFP_KERNEL, shadow_ctor, &pnfields2[i]); - if (!sv2[i]) { - ret = -ENOMEM; - goto out; - } - } - - /* pass 2: verify we find allocated svars and where they point to */ - for (i = 0; i < NUM_OBJS; i++) { - /* check the "char" svar for all objects */ - sv = shadow_get(&objs[i], SV_ID1); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv1[i]), ptr_id(*sv1[i])); - - /* check the "int" svar for all objects */ - sv = shadow_get(&objs[i], SV_ID2); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv2[i]), ptr_id(*sv2[i])); - } - - /* pass 3: verify that 'get_or_alloc' returns already allocated svars */ - for (i = 0; i < NUM_OBJS; i++) { - pndup[i] = &nfields1[i]; - ptr_id(pndup[i]); - - sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]), - GFP_KERNEL, shadow_ctor, &pndup[i]); - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv1[i]), ptr_id(*sv1[i])); - } - - /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */ - for (i = 0; i < NUM_OBJS; i++) { - shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */ - sv = shadow_get(&objs[i], SV_ID1); - if (!sv) - pr_info(" got expected NULL result\n"); - } - - /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */ - for (i = 0; i < NUM_OBJS; i++) { - sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */ - if (!sv) { - ret = -EINVAL; - goto out; - } - if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i]) - pr_info(" got expected PTR%d -> PTR%d result\n", - ptr_id(sv2[i]), ptr_id(*sv2[i])); - } - - /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */ - shadow_free_all(SV_ID2, NULL); /* 'int' pairs */ - for (i = 0; i < NUM_OBJS; i++) { - sv = shadow_get(&objs[i], SV_ID2); - if (!sv) - pr_info(" got expected NULL result\n"); - } - - free_ptr_list(); - - return 0; -out: - shadow_free_all(SV_ID1, NULL); /* 'char' pairs */ - shadow_free_all(SV_ID2, NULL); /* 'int' pairs */ - free_ptr_list(); - - return ret; -} - -static void test_klp_shadow_vars_exit(void) -{ -} - -module_init(test_klp_shadow_vars_init); -module_exit(test_klp_shadow_vars_exit); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>"); -MODULE_DESCRIPTION("Livepatch test: shadow variables"); diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c deleted file mode 100644 index 57a4253acb01..000000000000 --- a/lib/livepatch/test_klp_state.c +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/livepatch.h> - -#define CONSOLE_LOGLEVEL_STATE 1 -/* Version 1 does not support migration. */ -#define CONSOLE_LOGLEVEL_STATE_VERSION 1 - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -static struct klp_patch patch; - -static int allocate_loglevel_state(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return -EINVAL; - - loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL); - if (!loglevel_state->data) - return -ENOMEM; - - pr_info("%s: allocating space to store console_loglevel\n", - __func__); - return 0; -} - -static void fix_console_loglevel(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: fixing console_loglevel\n", __func__); - *(int *)loglevel_state->data = console_loglevel; - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} - -static void restore_console_loglevel(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: restoring console_loglevel\n", __func__); - console_loglevel = *(int *)loglevel_state->data; -} - -static void free_loglevel_state(void) -{ - struct klp_state *loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: freeing space for the stored console_loglevel\n", - __func__); - kfree(loglevel_state->data); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return allocate_loglevel_state(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - fix_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - restore_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - free_loglevel_state(); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_state states[] = { - { - .id = CONSOLE_LOGLEVEL_STATE, - .version = CONSOLE_LOGLEVEL_STATE_VERSION, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - .states = states, - .replace = true, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>"); -MODULE_DESCRIPTION("Livepatch test: system state modification"); diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c deleted file mode 100644 index c978ea4d5e67..000000000000 --- a/lib/livepatch/test_klp_state2.c +++ /dev/null @@ -1,191 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/printk.h> -#include <linux/livepatch.h> - -#define CONSOLE_LOGLEVEL_STATE 1 -/* Version 2 supports migration. */ -#define CONSOLE_LOGLEVEL_STATE_VERSION 2 - -static const char *const module_state[] = { - [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state", - [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init", - [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away", - [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up", -}; - -static void callback_info(const char *callback, struct klp_object *obj) -{ - if (obj->mod) - pr_info("%s: %s -> %s\n", callback, obj->mod->name, - module_state[obj->mod->state]); - else - pr_info("%s: vmlinux\n", callback); -} - -static struct klp_patch patch; - -static int allocate_loglevel_state(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: space to store console_loglevel already allocated\n", - __func__); - return 0; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return -EINVAL; - - loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL); - if (!loglevel_state->data) - return -ENOMEM; - - pr_info("%s: allocating space to store console_loglevel\n", - __func__); - return 0; -} - -static void fix_console_loglevel(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: taking over the console_loglevel change\n", - __func__); - loglevel_state->data = prev_loglevel_state->data; - return; - } - - pr_info("%s: fixing console_loglevel\n", __func__); - *(int *)loglevel_state->data = console_loglevel; - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} - -static void restore_console_loglevel(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: passing the console_loglevel change back to the old livepatch\n", - __func__); - return; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: restoring console_loglevel\n", __func__); - console_loglevel = *(int *)loglevel_state->data; -} - -static void free_loglevel_state(void) -{ - struct klp_state *loglevel_state, *prev_loglevel_state; - - prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE); - if (prev_loglevel_state) { - pr_info("%s: keeping space to store console_loglevel\n", - __func__); - return; - } - - loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE); - if (!loglevel_state) - return; - - pr_info("%s: freeing space for the stored console_loglevel\n", - __func__); - kfree(loglevel_state->data); -} - -/* Executed on object patching (ie, patch enablement) */ -static int pre_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - return allocate_loglevel_state(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_patch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - fix_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void pre_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - restore_console_loglevel(); -} - -/* Executed on object unpatching (ie, patch disablement) */ -static void post_unpatch_callback(struct klp_object *obj) -{ - callback_info(__func__, obj); - free_loglevel_state(); -} - -static struct klp_func no_funcs[] = { - {} -}; - -static struct klp_object objs[] = { - { - .name = NULL, /* vmlinux */ - .funcs = no_funcs, - .callbacks = { - .pre_patch = pre_patch_callback, - .post_patch = post_patch_callback, - .pre_unpatch = pre_unpatch_callback, - .post_unpatch = post_unpatch_callback, - }, - }, { } -}; - -static struct klp_state states[] = { - { - .id = CONSOLE_LOGLEVEL_STATE, - .version = CONSOLE_LOGLEVEL_STATE_VERSION, - }, { } -}; - -static struct klp_patch patch = { - .mod = THIS_MODULE, - .objs = objs, - .states = states, - .replace = true, -}; - -static int test_klp_callbacks_demo_init(void) -{ - return klp_enable_patch(&patch); -} - -static void test_klp_callbacks_demo_exit(void) -{ -} - -module_init(test_klp_callbacks_demo_init); -module_exit(test_klp_callbacks_demo_exit); -MODULE_LICENSE("GPL"); -MODULE_INFO(livepatch, "Y"); -MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>"); -MODULE_DESCRIPTION("Livepatch test: system state modification"); diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c deleted file mode 100644 index 9226579d10c5..000000000000 --- a/lib/livepatch/test_klp_state3.c +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2019 SUSE - -/* The console loglevel fix is the same in the next cumulative patch. */ -#include "test_klp_state2.c" diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 6f241bb38799..af0970288727 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4290,6 +4290,56 @@ exists: } +/** + * mas_alloc_cyclic() - Internal call to find somewhere to store an entry + * @mas: The maple state. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, or -EBUSY if there are no + * free entries. + */ +int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + unsigned long min = range_lo; + int ret = 0; + + range_lo = max(min, *next); + ret = mas_empty_area(mas, range_lo, range_hi, 1); + if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { + mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + if (ret < 0 && range_lo > min) { + ret = mas_empty_area(mas, min, range_hi, 1); + if (ret == 0) + ret = 1; + } + if (ret < 0) + return ret; + + do { + mas_insert(mas, entry); + } while (mas_nomem(mas, gfp)); + if (mas_is_err(mas)) + return xa_err(mas->node); + + *startp = mas->index; + *next = *startp + 1; + if (*next == 0) + mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; + + return ret; +} +EXPORT_SYMBOL(mas_alloc_cyclic); + static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) { retry: @@ -6443,6 +6493,49 @@ unlock: } EXPORT_SYMBOL(mtree_alloc_range); +/** + * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. + * @mt: The maple tree. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Finds an empty entry in @mt after @next, stores the new index into + * the @id pointer, stores the entry at that index, then updates @next. + * + * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. + * + * Context: Any context. Takes and releases the mt.lock. May sleep if + * the @gfp flags permit. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no + * free entries. + */ +int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + int ret; + + MA_STATE(mas, mt, 0, 0); + + if (!mt_is_alloc(mt)) + return -EINVAL; + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + mtree_lock(mt); + ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, + next, gfp); + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_cyclic); + int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, void *entry, unsigned long size, unsigned long min, unsigned long max, gfp_t gfp) diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 440aee705ccc..30e00ef0bf2e 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -32,7 +32,7 @@ struct some_bytes { BUILD_BUG_ON(sizeof(instance.data) != 32); \ for (size_t i = 0; i < sizeof(instance.data); i++) { \ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \ - "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \ + "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \ __LINE__, #instance, v, i, instance.data[i]); \ } \ } while (0) @@ -41,7 +41,7 @@ struct some_bytes { BUILD_BUG_ON(sizeof(one) != sizeof(two)); \ for (size_t i = 0; i < sizeof(one); i++) { \ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \ - "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \ + "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \ } \ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \ diff --git a/lib/nlattr.c b/lib/nlattr.c index ed2ab43e1b22..be9c576b6e2d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), + [NLA_BE16] = sizeof(__be16), + [NLA_BE32] = sizeof(__be32), }; static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { @@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), + [NLA_BE16] = sizeof(__be16), + [NLA_BE32] = sizeof(__be32), }; /* diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc index cd0b9e95f499..863e2d320938 100644 --- a/lib/raid6/s390vx.uc +++ b/lib/raid6/s390vx.uc @@ -12,15 +12,14 @@ */ #include <linux/raid/pq.h> -#include <asm/fpu/api.h> -#include <asm/vx-insn.h> +#include <asm/fpu.h> #define NSIZE 16 -static inline void LOAD_CONST(void) +static __always_inline void LOAD_CONST(void) { - asm volatile("VREPIB %v24,7"); - asm volatile("VREPIB %v25,0x1d"); + fpu_vrepib(24, 0x07); + fpu_vrepib(25, 0x1d); } /* @@ -28,10 +27,7 @@ static inline void LOAD_CONST(void) * vector register y left by 1 bit and stores the result in * vector register x. */ -static inline void SHLBYTE(int x, int y) -{ - asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y)); -} +#define SHLBYTE(x, y) fpu_vab(x, y, y) /* * For each of the 16 bytes in the vector register y the MASK() @@ -39,49 +35,17 @@ static inline void SHLBYTE(int x, int y) * or 0x00 if the high bit is 0. The result is stored in vector * register x. */ -static inline void MASK(int x, int y) -{ - asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y)); -} - -static inline void AND(int x, int y, int z) -{ - asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); -} - -static inline void XOR(int x, int y, int z) -{ - asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z)); -} +#define MASK(x, y) fpu_vesravb(x, y, 24) -static inline void LOAD_DATA(int x, u8 *ptr) -{ - typedef struct { u8 _[16 * $#]; } addrtype; - register addrtype *__ptr asm("1") = (addrtype *) ptr; - - asm volatile ("VLM %2,%3,0,%1" - : : "m" (*__ptr), "a" (__ptr), "i" (x), - "i" (x + $# - 1)); -} - -static inline void STORE_DATA(int x, u8 *ptr) -{ - typedef struct { u8 _[16 * $#]; } addrtype; - register addrtype *__ptr asm("1") = (addrtype *) ptr; - - asm volatile ("VSTM %2,%3,0,1" - : "=m" (*__ptr) : "a" (__ptr), "i" (x), - "i" (x + $# - 1)); -} - -static inline void COPY_VEC(int x, int y) -{ - asm volatile ("VLR %0,%1" : : "i" (x), "i" (y)); -} +#define AND(x, y, z) fpu_vn(x, y, z) +#define XOR(x, y, z) fpu_vx(x, y, z) +#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr) +#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr) +#define COPY_VEC(x, y) fpu_vlr(x, y) static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) { - struct kernel_fpu vxstate; + DECLARE_KERNEL_FPU_ONSTACK32(vxstate); u8 **dptr, *p, *q; int d, z, z0; @@ -114,7 +78,7 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop, size_t bytes, void **ptrs) { - struct kernel_fpu vxstate; + DECLARE_KERNEL_FPU_ONSTACK32(vxstate); u8 **dptr, *p, *q; int d, z, z0; diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 010c730ca7fc..f3f3436d60a9 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -13,16 +13,26 @@ * seq_buf_init() more than once to reset the seq_buf to start * from scratch. */ -#include <linux/uaccess.h> -#include <linux/seq_file.h> + +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/hex.h> +#include <linux/minmax.h> +#include <linux/printk.h> #include <linux/seq_buf.h> +#include <linux/seq_file.h> +#include <linux/sprintf.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/uaccess.h> /** * seq_buf_can_fit - can the new data fit in the current buffer? * @s: the seq_buf descriptor * @len: The length to see if it can fit in the current buffer * - * Returns true if there's enough unused space in the seq_buf buffer + * Returns: true if there's enough unused space in the seq_buf buffer * to fit the amount of new data according to @len. */ static bool seq_buf_can_fit(struct seq_buf *s, size_t len) @@ -35,7 +45,7 @@ static bool seq_buf_can_fit(struct seq_buf *s, size_t len) * @m: the seq_file descriptor that is the destination * @s: the seq_buf descriptor that is the source. * - * Returns zero on success, non zero otherwise + * Returns: zero on success, non-zero otherwise. */ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) { @@ -50,9 +60,9 @@ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) * @fmt: printf format string * @args: va_list of arguments from a printf() type function * - * Writes a vnprintf() format into the sequencce buffer. + * Writes a vnprintf() format into the sequence buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) { @@ -78,7 +88,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) * * Writes a printf() format into the sequence buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) { @@ -94,12 +104,12 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) EXPORT_SYMBOL_GPL(seq_buf_printf); /** - * seq_buf_do_printk - printk seq_buf line by line + * seq_buf_do_printk - printk() seq_buf line by line * @s: seq_buf descriptor * @lvl: printk level * * printk()-s a multi-line sequential buffer line by line. The function - * makes sure that the buffer in @s is nul terminated and safe to read + * makes sure that the buffer in @s is NUL-terminated and safe to read * as a string. */ void seq_buf_do_printk(struct seq_buf *s, const char *lvl) @@ -139,7 +149,7 @@ EXPORT_SYMBOL_GPL(seq_buf_do_printk); * This function will take the format and the binary array and finish * the conversion into the ASCII string within the buffer. * - * Returns zero on success, -1 on overflow. + * Returns: zero on success, -1 on overflow. */ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) { @@ -167,7 +177,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) * * Copy a simple string into the sequence buffer. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_puts(struct seq_buf *s, const char *str) { @@ -196,7 +206,7 @@ EXPORT_SYMBOL_GPL(seq_buf_puts); * * Copy a single character into the sequence buffer. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putc(struct seq_buf *s, unsigned char c) { @@ -212,7 +222,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c) EXPORT_SYMBOL_GPL(seq_buf_putc); /** - * seq_buf_putmem - write raw data into the sequenc buffer + * seq_buf_putmem - write raw data into the sequence buffer * @s: seq_buf descriptor * @mem: The raw memory to copy into the buffer * @len: The length of the raw memory to copy (in bytes) @@ -221,7 +231,7 @@ EXPORT_SYMBOL_GPL(seq_buf_putc); * buffer and a strcpy() would not work. Using this function allows * for such cases. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) { @@ -249,7 +259,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) * raw memory into the buffer it writes its ASCII representation of it * in hex characters. * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len) @@ -297,7 +307,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, * * Write a path name into the sequence buffer. * - * Returns the number of written bytes on success, -1 on overflow + * Returns: the number of written bytes on success, -1 on overflow. */ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) { @@ -332,6 +342,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) * or until it reaches the end of the content in the buffer (@s->len), * whichever comes first. * + * Returns: * On success, it returns a positive number of the number of bytes * it copied. * @@ -382,11 +393,11 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt) * linebuf size is maximal length for one line. * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for * separating space - * 2 - spaces separating hex dump and ascii representation - * 32 - ascii representation + * 2 - spaces separating hex dump and ASCII representation + * 32 - ASCII representation * 1 - terminating '\0' * - * Returns zero on success, -1 on overflow + * Returns: zero on success, -1 on overflow. */ int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, diff --git a/lib/stackdepot.c b/lib/stackdepot.c index a0be5d05c7f0..4a7055a63d9f 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) "stackdepot: " fmt +#include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/jhash.h> #include <linux/kernel.h> @@ -21,8 +22,10 @@ #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> -#include <linux/percpu.h> +#include <linux/poison.h> #include <linux/printk.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -41,17 +44,7 @@ #define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN) #define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \ STACK_DEPOT_EXTRA_BITS) -#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32 -/* - * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack - * traces. As KMSAN does not support evicting stack traces from the stack - * depot, the stack depot capacity might be reached quickly with large stack - * records. Adjust the maximum number of stack depot pools for this case. - */ -#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16)) -#else #define DEPOT_POOLS_CAP 8192 -#endif #define DEPOT_MAX_POOLS \ (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \ (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP) @@ -67,17 +60,30 @@ union handle_parts { }; struct stack_record { - struct list_head list; /* Links in hash table or freelist */ + struct list_head hash_list; /* Links in the hash table */ u32 hash; /* Hash in hash table */ u32 size; /* Number of stored frames */ - union handle_parts handle; + union handle_parts handle; /* Constant after initialization */ refcount_t count; - unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ + union { + unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */ + struct { + /* + * An important invariant of the implementation is to + * only place a stack record onto the freelist iff its + * refcount is zero. Because stack records with a zero + * refcount are never considered as valid, it is safe to + * union @entries and freelist management state below. + * Conversely, as soon as an entry is off the freelist + * and its refcount becomes non-zero, the below must not + * be accessed until being placed back on the freelist. + */ + struct list_head free_list; /* Links in the freelist */ + unsigned long rcu_state; /* RCU cookie */ + }; + }; }; -#define DEPOT_STACK_RECORD_SIZE \ - ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN) - static bool stack_depot_disabled; static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); static bool __stack_depot_early_init_passed __initdata; @@ -103,17 +109,33 @@ static void *stack_pools[DEPOT_MAX_POOLS]; static void *new_pool; /* Number of pools in stack_pools. */ static int pools_num; +/* Offset to the unused space in the currently used pool. */ +static size_t pool_offset = DEPOT_POOL_SIZE; /* Freelist of stack records within stack_pools. */ static LIST_HEAD(free_stacks); -/* - * Stack depot tries to keep an extra pool allocated even before it runs out - * of space in the currently used pool. This flag marks whether this extra pool - * needs to be allocated. It has the value 0 when either an extra pool is not - * yet allocated or if the limit on the number of pools is reached. - */ -static bool new_pool_required = true; -/* Lock that protects the variables above. */ -static DEFINE_RWLOCK(pool_rwlock); +/* The lock must be held when performing pool or freelist modifications. */ +static DEFINE_RAW_SPINLOCK(pool_lock); + +/* Statistics counters for debugfs. */ +enum depot_counter_id { + DEPOT_COUNTER_REFD_ALLOCS, + DEPOT_COUNTER_REFD_FREES, + DEPOT_COUNTER_REFD_INUSE, + DEPOT_COUNTER_FREELIST_SIZE, + DEPOT_COUNTER_PERSIST_COUNT, + DEPOT_COUNTER_PERSIST_BYTES, + DEPOT_COUNTER_COUNT, +}; +static long counters[DEPOT_COUNTER_COUNT]; +static const char *const counter_names[] = { + [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations", + [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees", + [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use", + [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size", + [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count", + [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes", +}; +static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT); static int __init disable_stack_depot(char *str) { @@ -258,174 +280,273 @@ out_unlock: } EXPORT_SYMBOL_GPL(stack_depot_init); -/* Initializes a stack depol pool. */ -static void depot_init_pool(void *pool) +/* + * Initializes new stack pool, and updates the list of pools. + */ +static bool depot_init_pool(void **prealloc) { - int offset; + lockdep_assert_held(&pool_lock); - lockdep_assert_held_write(&pool_rwlock); + if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { + /* Bail out if we reached the pool limit. */ + WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ + WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ + WARN_ONCE(1, "Stack depot reached limit capacity"); + return false; + } - WARN_ON(!list_empty(&free_stacks)); + if (!new_pool && *prealloc) { + /* We have preallocated memory, use it. */ + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; + } - /* Initialize handles and link stack records into the freelist. */ - for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE; - offset += DEPOT_STACK_RECORD_SIZE) { - struct stack_record *stack = pool + offset; + if (!new_pool) + return false; /* new_pool and *prealloc are NULL */ - stack->handle.pool_index = pools_num; - stack->handle.offset = offset >> DEPOT_STACK_ALIGN; - stack->handle.extra = 0; + /* Save reference to the pool to be used by depot_fetch_stack(). */ + stack_pools[pools_num] = new_pool; - list_add(&stack->list, &free_stacks); - } + /* + * Stack depot tries to keep an extra pool allocated even before it runs + * out of space in the currently used pool. + * + * To indicate that a new preallocation is needed new_pool is reset to + * NULL; do not reset to NULL if we have reached the maximum number of + * pools. + */ + if (pools_num < DEPOT_MAX_POOLS) + WRITE_ONCE(new_pool, NULL); + else + WRITE_ONCE(new_pool, STACK_DEPOT_POISON); - /* Save reference to the pool to be used by depot_fetch_stack(). */ - stack_pools[pools_num] = pool; - pools_num++; + /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */ + WRITE_ONCE(pools_num, pools_num + 1); + ASSERT_EXCLUSIVE_WRITER(pools_num); + + pool_offset = 0; + + return true; } /* Keeps the preallocated memory to be used for a new stack depot pool. */ static void depot_keep_new_pool(void **prealloc) { - lockdep_assert_held_write(&pool_rwlock); + lockdep_assert_held(&pool_lock); /* * If a new pool is already saved or the maximum number of * pools is reached, do not use the preallocated memory. */ - if (!new_pool_required) + if (new_pool) return; - /* - * Use the preallocated memory for the new pool - * as long as we do not exceed the maximum number of pools. - */ - if (pools_num < DEPOT_MAX_POOLS) { - new_pool = *prealloc; - *prealloc = NULL; + WRITE_ONCE(new_pool, *prealloc); + *prealloc = NULL; +} + +/* + * Try to initialize a new stack record from the current pool, a cached pool, or + * the current pre-allocation. + */ +static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) +{ + struct stack_record *stack; + void *current_pool; + u32 pool_index; + + lockdep_assert_held(&pool_lock); + + if (pool_offset + size > DEPOT_POOL_SIZE) { + if (!depot_init_pool(prealloc)) + return NULL; } - /* - * At this point, either a new pool is kept or the maximum - * number of pools is reached. In either case, take note that - * keeping another pool is not required. - */ - new_pool_required = false; + if (WARN_ON_ONCE(pools_num < 1)) + return NULL; + pool_index = pools_num - 1; + current_pool = stack_pools[pool_index]; + if (WARN_ON_ONCE(!current_pool)) + return NULL; + + stack = current_pool + pool_offset; + + /* Pre-initialize handle once. */ + stack->handle.pool_index = pool_index; + stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; + stack->handle.extra = 0; + INIT_LIST_HEAD(&stack->hash_list); + + pool_offset += size; + + return stack; } -/* Updates references to the current and the next stack depot pools. */ -static bool depot_update_pools(void **prealloc) +/* Try to find next free usable entry from the freelist. */ +static struct stack_record *depot_pop_free(void) { - lockdep_assert_held_write(&pool_rwlock); + struct stack_record *stack; - /* Check if we still have objects in the freelist. */ - if (!list_empty(&free_stacks)) - goto out_keep_prealloc; + lockdep_assert_held(&pool_lock); - /* Check if we have a new pool saved and use it. */ - if (new_pool) { - depot_init_pool(new_pool); - new_pool = NULL; + if (list_empty(&free_stacks)) + return NULL; - /* Take note that we might need a new new_pool. */ - if (pools_num < DEPOT_MAX_POOLS) - new_pool_required = true; + /* + * We maintain the invariant that the elements in front are least + * recently used, and are therefore more likely to be associated with an + * RCU grace period in the past. Consequently it is sufficient to only + * check the first entry. + */ + stack = list_first_entry(&free_stacks, struct stack_record, free_list); + if (!poll_state_synchronize_rcu(stack->rcu_state)) + return NULL; - /* Try keeping the preallocated memory for new_pool. */ - goto out_keep_prealloc; - } + list_del(&stack->free_list); + counters[DEPOT_COUNTER_FREELIST_SIZE]--; - /* Bail out if we reached the pool limit. */ - if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { - WARN_ONCE(1, "Stack depot reached limit capacity"); - return false; - } + return stack; +} - /* Check if we have preallocated memory and use it. */ - if (*prealloc) { - depot_init_pool(*prealloc); - *prealloc = NULL; - return true; - } +static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries) +{ + const size_t used = flex_array_size(s, entries, nr_entries); + const size_t unused = sizeof(s->entries) - used; - return false; + WARN_ON_ONCE(sizeof(s->entries) < used); -out_keep_prealloc: - /* Keep the preallocated memory for a new pool if required. */ - if (*prealloc) - depot_keep_new_pool(prealloc); - return true; + return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN); } /* Allocates a new stack in a stack depot pool. */ static struct stack_record * -depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) +depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) { - struct stack_record *stack; + struct stack_record *stack = NULL; + size_t record_size; - lockdep_assert_held_write(&pool_rwlock); + lockdep_assert_held(&pool_lock); - /* Update current and new pools if required and possible. */ - if (!depot_update_pools(prealloc)) + /* This should already be checked by public API entry points. */ + if (WARN_ON_ONCE(!nr_entries)) return NULL; - /* Check if we have a stack record to save the stack trace. */ - if (list_empty(&free_stacks)) - return NULL; + /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ + if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES) + nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES; - /* Get and unlink the first entry from the freelist. */ - stack = list_first_entry(&free_stacks, struct stack_record, list); - list_del(&stack->list); + if (flags & STACK_DEPOT_FLAG_GET) { + /* + * Evictable entries have to allocate the max. size so they may + * safely be re-used by differently sized allocations. + */ + record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES); + stack = depot_pop_free(); + } else { + record_size = depot_stack_record_size(stack, nr_entries); + } - /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ - if (size > CONFIG_STACKDEPOT_MAX_FRAMES) - size = CONFIG_STACKDEPOT_MAX_FRAMES; + if (!stack) { + stack = depot_pop_free_pool(prealloc, record_size); + if (!stack) + return NULL; + } /* Save the stack trace. */ stack->hash = hash; - stack->size = size; - /* stack->handle is already filled in by depot_init_pool(). */ - refcount_set(&stack->count, 1); - memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); + stack->size = nr_entries; + /* stack->handle is already filled in by depot_pop_free_pool(). */ + memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries)); + + if (flags & STACK_DEPOT_FLAG_GET) { + refcount_set(&stack->count, 1); + counters[DEPOT_COUNTER_REFD_ALLOCS]++; + counters[DEPOT_COUNTER_REFD_INUSE]++; + } else { + /* Warn on attempts to switch to refcounting this entry. */ + refcount_set(&stack->count, REFCOUNT_SATURATED); + counters[DEPOT_COUNTER_PERSIST_COUNT]++; + counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size; + } /* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it. */ - kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE); + kmsan_unpoison_memory(stack, record_size); return stack; } static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) { + const int pools_num_cached = READ_ONCE(pools_num); union handle_parts parts = { .handle = handle }; void *pool; size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; - lockdep_assert_held(&pool_rwlock); + lockdep_assert_not_held(&pool_lock); - if (parts.pool_index > pools_num) { + if (parts.pool_index > pools_num_cached) { WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pools_num, handle); + parts.pool_index, pools_num_cached, handle); return NULL; } pool = stack_pools[parts.pool_index]; - if (!pool) + if (WARN_ON(!pool)) return NULL; stack = pool + offset; + if (WARN_ON(!refcount_read(&stack->count))) + return NULL; + return stack; } /* Links stack into the freelist. */ static void depot_free_stack(struct stack_record *stack) { - lockdep_assert_held_write(&pool_rwlock); + unsigned long flags; + + lockdep_assert_not_held(&pool_lock); + + raw_spin_lock_irqsave(&pool_lock, flags); + printk_deferred_enter(); - list_add(&stack->list, &free_stacks); + /* + * Remove the entry from the hash list. Concurrent list traversal may + * still observe the entry, but since the refcount is zero, this entry + * will no longer be considered as valid. + */ + list_del_rcu(&stack->hash_list); + + /* + * Due to being used from constrained contexts such as the allocators, + * NMI, or even RCU itself, stack depot cannot rely on primitives that + * would sleep (such as synchronize_rcu()) or recursively call into + * stack depot again (such as call_rcu()). + * + * Instead, get an RCU cookie, so that we can ensure this entry isn't + * moved onto another list until the next grace period, and concurrent + * RCU list traversal remains safe. + */ + stack->rcu_state = get_state_synchronize_rcu(); + + /* + * Add the entry to the freelist tail, so that older entries are + * considered first - their RCU cookie is more likely to no longer be + * associated with the current grace period. + */ + list_add_tail(&stack->free_list, &free_stacks); + + counters[DEPOT_COUNTER_FREELIST_SIZE]++; + counters[DEPOT_COUNTER_REFD_FREES]++; + counters[DEPOT_COUNTER_REFD_INUSE]--; + + printk_deferred_exit(); + raw_spin_unlock_irqrestore(&pool_lock, flags); } /* Calculates the hash for a stack. */ @@ -453,22 +574,52 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, /* Finds a stack in a bucket of the hash table. */ static inline struct stack_record *find_stack(struct list_head *bucket, - unsigned long *entries, int size, - u32 hash) + unsigned long *entries, int size, + u32 hash, depot_flags_t flags) { - struct list_head *pos; - struct stack_record *found; + struct stack_record *stack, *ret = NULL; - lockdep_assert_held(&pool_rwlock); + /* + * Stack depot may be used from instrumentation that instruments RCU or + * tracing itself; use variant that does not call into RCU and cannot be + * traced. + * + * Note: Such use cases must take care when using refcounting to evict + * unused entries, because the stack record free-then-reuse code paths + * do call into RCU. + */ + rcu_read_lock_sched_notrace(); + + list_for_each_entry_rcu(stack, bucket, hash_list) { + if (stack->hash != hash || stack->size != size) + continue; + + /* + * This may race with depot_free_stack() accessing the freelist + * management state unioned with @entries. The refcount is zero + * in that case and the below refcount_inc_not_zero() will fail. + */ + if (data_race(stackdepot_memcmp(entries, stack->entries, size))) + continue; + + /* + * Try to increment refcount. If this succeeds, the stack record + * is valid and has not yet been freed. + * + * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior + * to then call stack_depot_put() later, and we can assume that + * a stack record is never placed back on the freelist. + */ + if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) + continue; - list_for_each(pos, bucket) { - found = list_entry(pos, struct stack_record, list); - if (found->hash == hash && - found->size == size && - !stackdepot_memcmp(entries, found->entries, size)) - return found; + ret = stack; + break; } - return NULL; + + rcu_read_unlock_sched_notrace(); + + return ret; } depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, @@ -482,7 +633,6 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, struct page *page = NULL; void *prealloc = NULL; bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC; - bool need_alloc = false; unsigned long flags; u32 hash; @@ -505,31 +655,16 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, hash = hash_stack(entries, nr_entries); bucket = &stack_table[hash & stack_hash_mask]; - read_lock_irqsave(&pool_rwlock, flags); - printk_deferred_enter(); - - /* Fast path: look the stack trace up without full locking. */ - found = find_stack(bucket, entries, nr_entries, hash); - if (found) { - if (depot_flags & STACK_DEPOT_FLAG_GET) - refcount_inc(&found->count); - printk_deferred_exit(); - read_unlock_irqrestore(&pool_rwlock, flags); + /* Fast path: look the stack trace up without locking. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); + if (found) goto exit; - } - - /* Take note if another stack pool needs to be allocated. */ - if (new_pool_required) - need_alloc = true; - - printk_deferred_exit(); - read_unlock_irqrestore(&pool_rwlock, flags); /* * Allocate memory for a new pool if required now: * we won't be able to do that under the lock. */ - if (unlikely(can_alloc && need_alloc)) { + if (unlikely(can_alloc && !READ_ONCE(new_pool))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic @@ -543,31 +678,36 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, prealloc = page_address(page); } - write_lock_irqsave(&pool_rwlock, flags); + raw_spin_lock_irqsave(&pool_lock, flags); printk_deferred_enter(); - found = find_stack(bucket, entries, nr_entries, hash); + /* Try to find again, to avoid concurrently inserting duplicates. */ + found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (!found) { struct stack_record *new = - depot_alloc_stack(entries, nr_entries, hash, &prealloc); + depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc); if (new) { - list_add(&new->list, bucket); + /* + * This releases the stack record into the bucket and + * makes it visible to readers in find_stack(). + */ + list_add_rcu(&new->hash_list, bucket); found = new; } - } else { - if (depot_flags & STACK_DEPOT_FLAG_GET) - refcount_inc(&found->count); + } + + if (prealloc) { /* - * Stack depot already contains this stack trace, but let's - * keep the preallocated memory for future. + * Either stack depot already contains this stack trace, or + * depot_alloc_stack() did not consume the preallocated memory. + * Try to keep the preallocated memory for future. */ - if (prealloc) - depot_keep_new_pool(&prealloc); + depot_keep_new_pool(&prealloc); } printk_deferred_exit(); - write_unlock_irqrestore(&pool_rwlock, flags); + raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { /* Stack depot didn't use this memory, free it. */ @@ -592,7 +732,6 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { struct stack_record *stack; - unsigned long flags; *entries = NULL; /* @@ -604,13 +743,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, if (!handle || stack_depot_disabled) return 0; - read_lock_irqsave(&pool_rwlock, flags); - printk_deferred_enter(); - stack = depot_fetch_stack(handle); - - printk_deferred_exit(); - read_unlock_irqrestore(&pool_rwlock, flags); + /* + * Should never be NULL, otherwise this is a use-after-put (or just a + * corrupt handle). + */ + if (WARN(!stack, "corrupt handle or use after stack_depot_put()")) + return 0; *entries = stack->entries; return stack->size; @@ -620,29 +759,20 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch); void stack_depot_put(depot_stack_handle_t handle) { struct stack_record *stack; - unsigned long flags; if (!handle || stack_depot_disabled) return; - write_lock_irqsave(&pool_rwlock, flags); - printk_deferred_enter(); - stack = depot_fetch_stack(handle); - if (WARN_ON(!stack)) - goto out; - - if (refcount_dec_and_test(&stack->count)) { - /* Unlink stack from the hash table. */ - list_del(&stack->list); + /* + * Should always be able to find the stack record, otherwise this is an + * unbalanced put attempt (or corrupt handle). + */ + if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()")) + return; - /* Free stack. */ + if (refcount_dec_and_test(&stack->count)) depot_free_stack(stack); - } - -out: - printk_deferred_exit(); - write_unlock_irqrestore(&pool_rwlock, flags); } EXPORT_SYMBOL_GPL(stack_depot_put); @@ -690,3 +820,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) return parts.extra; } EXPORT_SYMBOL(stack_depot_get_extra_bits); + +static int stats_show(struct seq_file *seq, void *v) +{ + /* + * data race ok: These are just statistics counters, and approximate + * statistics are ok for debugging. + */ + seq_printf(seq, "pools: %d\n", data_race(pools_num)); + for (int i = 0; i < DEPOT_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +static int depot_debugfs_init(void) +{ + struct dentry *dir; + + if (stack_depot_disabled) + return 0; + + dir = debugfs_create_dir("stackdepot", NULL); + debugfs_create_file("stats", 0444, dir, NULL, &stats_fops); + return 0; +} +late_initcall(depot_debugfs_init); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 29185ac5c727..399380db449c 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -3599,6 +3599,45 @@ static noinline void __init check_state_handling(struct maple_tree *mt) mas_unlock(&mas); } +static noinline void __init alloc_cyclic_testing(struct maple_tree *mt) +{ + unsigned long location; + unsigned long next; + int ret = 0; + MA_STATE(mas, mt, 0, 0); + + next = 0; + mtree_lock(mt); + for (int i = 0; i < 100; i++) { + mas_alloc_cyclic(&mas, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MAS_BUG_ON(&mas, i != location - 2); + MAS_BUG_ON(&mas, mas.index != location); + MAS_BUG_ON(&mas, mas.last != location); + MAS_BUG_ON(&mas, i != next - 3); + } + + mtree_unlock(mt); + mtree_destroy(mt); + next = 0; + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (int i = 0; i < 100; i++) { + mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, i != location - 2); + MT_BUG_ON(mt, i != next - 3); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + } + + mtree_destroy(mt); + /* Overflow test */ + next = ULONG_MAX - 1; + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 0); + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 0); + ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); + MT_BUG_ON(mt, ret != 1); +} + static DEFINE_MTREE(tree); static int __init maple_tree_seed(void) { @@ -3880,6 +3919,11 @@ static int __init maple_tree_seed(void) check_state_handling(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + alloc_cyclic_testing(&tree); + mtree_destroy(&tree); + + #if defined(BENCH) skip: #endif |