diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 4 | ||||
-rw-r--r-- | lib/Kconfig.debug | 14 | ||||
-rw-r--r-- | lib/Makefile | 5 | ||||
-rw-r--r-- | lib/btree.c | 1 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 57 | ||||
-rw-r--r-- | lib/crypto/blake2s-generic.c | 5 | ||||
-rw-r--r-- | lib/crypto/blake2s.c | 1 | ||||
-rw-r--r-- | lib/crypto/utils.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 125 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 51 | ||||
-rw-r--r-- | lib/fault-inject.c | 191 | ||||
-rw-r--r-- | lib/iov_iter.c | 124 | ||||
-rw-r--r-- | lib/kobject.c | 34 | ||||
-rw-r--r-- | lib/kunit/debugfs.c | 14 | ||||
-rw-r--r-- | lib/kunit/kunit-test.c | 77 | ||||
-rw-r--r-- | lib/kunit/test.c | 57 | ||||
-rw-r--r-- | lib/libcrc32c.c | 6 | ||||
-rw-r--r-- | lib/list-test.c | 300 | ||||
-rw-r--r-- | lib/maple_tree.c | 66 | ||||
-rw-r--r-- | lib/packing.c | 1 | ||||
-rw-r--r-- | lib/pldmfw/pldmfw.c | 1 | ||||
-rw-r--r-- | lib/rcuref.c | 281 | ||||
-rw-r--r-- | lib/test_fprobe.c | 1 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 2 | ||||
-rw-r--r-- | lib/vdso/Makefile | 13 | ||||
-rw-r--r-- | lib/vsprintf.c | 2 |
26 files changed, 1206 insertions, 229 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index ce2abffb9ed8..5c2da561c516 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -92,6 +92,7 @@ config ARCH_USE_SYM_ANNOTATIONS config INDIRECT_PIO bool "Access I/O in non-MMIO mode" depends on ARM64 + depends on HAS_IOPORT help On some platforms where no separate I/O space exists, there are I/O hosts which can not be accessed in MMIO mode. Using the logical PIO @@ -509,6 +510,9 @@ config HAS_IOMEM depends on !NO_IOMEM default y +config HAS_IOPORT + bool + config HAS_IOPORT_MAP bool depends on HAS_IOMEM && !NO_IOPORT_MAP diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 39d1d93164bd..786ba9fd8555 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1958,9 +1958,21 @@ config FAIL_SUNRPC Provide fault-injection capability for SunRPC and its consumers. +config FAULT_INJECTION_CONFIGFS + bool "Configfs interface for fault-injection capabilities" + depends on FAULT_INJECTION + select CONFIGFS_FS + help + This option allows configfs-based drivers to dynamically configure + fault-injection via configfs. Each parameter for driver-specific + fault-injection can be made visible as a configfs attribute in a + configfs group. + + config FAULT_INJECTION_STACKTRACE_FILTER bool "stacktrace filter for fault-injection capabilities" - depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT + depends on FAULT_INJECTION + depends on (FAULT_INJECTION_DEBUG_FS || FAULT_INJECTION_CONFIGFS) && STACKTRACE_SUPPORT select STACKTRACE depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 help diff --git a/lib/Makefile b/lib/Makefile index baf2821f7a00..876fcdeae34e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -47,7 +47,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o base64.o \ - once.o refcount.o usercopy.o errseq.o bucket_locks.o \ + once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o obj-y += string_helpers.o @@ -231,6 +231,9 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o +#ensure exported functions have prototypes +CFLAGS_dynamic_debug.o := -DDYNAMIC_DEBUG_MODULE + obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o obj-$(CONFIG_NLATTR) += nlattr.o diff --git a/lib/btree.c b/lib/btree.c index a82100c73b55..49420cae3a83 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -794,4 +794,3 @@ module_exit(btree_module_exit); MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); -MODULE_LICENSE("GPL"); diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index f08d9c56f712..73c1636b927b 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) } #endif +static int get_free_index(struct cpu_rmap *rmap) +{ + int i; + + for (i = 0; i < rmap->size; i++) + if (!rmap->obj[i]) + return i; + + return -ENOSPC; +} + /** * cpu_rmap_add - add object to a rmap * @rmap: CPU rmap allocated with alloc_cpu_rmap() * @obj: Object to add to rmap * - * Return index of object. + * Return index of object or -ENOSPC if no free entry was found */ int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) { - u16 index; + int index = get_free_index(rmap); + + if (index < 0) + return index; - BUG_ON(rmap->used >= rmap->size); - index = rmap->used++; rmap->obj[index] = obj; return index; } @@ -230,9 +242,10 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) if (!rmap) return; - for (index = 0; index < rmap->used; index++) { + for (index = 0; index < rmap->size; index++) { glue = rmap->obj[index]; - irq_set_affinity_notifier(glue->notify.irq, NULL); + if (glue) + irq_set_affinity_notifier(glue->notify.irq, NULL); } cpu_rmap_put(rmap); @@ -268,10 +281,22 @@ static void irq_cpu_rmap_release(struct kref *ref) container_of(ref, struct irq_glue, notify.kref); cpu_rmap_put(glue->rmap); + glue->rmap->obj[glue->index] = NULL; kfree(glue); } /** + * irq_cpu_rmap_remove - remove an IRQ from a CPU affinity reverse-map + * @rmap: The reverse-map + * @irq: The IRQ number + */ +int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq) +{ + return irq_set_affinity_notifier(irq, NULL); +} +EXPORT_SYMBOL(irq_cpu_rmap_remove); + +/** * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map * @rmap: The reverse-map * @irq: The IRQ number @@ -293,12 +318,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; cpu_rmap_get(rmap); - glue->index = cpu_rmap_add(rmap, glue); + rc = cpu_rmap_add(rmap, glue); + if (rc < 0) + goto err_add; + + glue->index = rc; rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) { - cpu_rmap_put(glue->rmap); - kfree(glue); - } + if (rc) + goto err_set; + + return rc; + +err_set: + rmap->obj[glue->index] = NULL; +err_add: + cpu_rmap_put(glue->rmap); + kfree(glue); return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 75ccb3e633e6..3b6dcfdd9628 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -12,7 +12,6 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/bug.h> #include <asm/unaligned.h> @@ -109,7 +108,3 @@ void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, } EXPORT_SYMBOL(blake2s_compress_generic); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("BLAKE2s hash function"); -MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 98e688c6d891..71a316552cc5 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -67,6 +67,5 @@ static int __init blake2s_mod_init(void) } module_init(blake2s_mod_init); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c index 53230ab1b195..c852c7151b0a 100644 --- a/lib/crypto/utils.c +++ b/lib/crypto/utils.c @@ -6,7 +6,7 @@ */ #include <asm/unaligned.h> -#include <crypto/algapi.h> +#include <crypto/utils.h> #include <linux/module.h> /* diff --git a/lib/debugobjects.c b/lib/debugobjects.c index df86e649d8be..b796799fadb2 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list) return obj; } -/* - * Allocate a new object. If the pool is empty, switch off the debugger. - * Must be called with interrupts disabled. - */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) { @@ -552,11 +548,49 @@ static void debug_object_is_on_stack(void *addr, int onstack) WARN_ON(1); } +static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, + const struct debug_obj_descr *descr, + bool onstack, bool alloc_ifstatic) +{ + struct debug_obj *obj = lookup_object(addr, b); + enum debug_obj_state state = ODEBUG_STATE_NONE; + + if (likely(obj)) + return obj; + + /* + * debug_object_init() unconditionally allocates untracked + * objects. It does not matter whether it is a static object or + * not. + * + * debug_object_assert_init() and debug_object_activate() allow + * allocation only if the descriptor callback confirms that the + * object is static and considered initialized. For non-static + * objects the allocation needs to be done from the fixup callback. + */ + if (unlikely(alloc_ifstatic)) { + if (!descr->is_static_object || !descr->is_static_object(addr)) + return ERR_PTR(-ENOENT); + /* Statically allocated objects are considered initialized */ + state = ODEBUG_STATE_INIT; + } + + obj = alloc_object(addr, b, descr); + if (likely(obj)) { + obj->state = state; + debug_object_is_on_stack(addr, onstack); + return obj; + } + + /* Out of memory. Do the cleanup outside of the locked region */ + debug_objects_enabled = 0; + return NULL; +} + static void __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) { enum debug_obj_state state; - bool check_stack = false; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; @@ -572,16 +606,11 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack raw_spin_lock_irqsave(&db->lock, flags); - obj = lookup_object(addr, db); - if (!obj) { - obj = alloc_object(addr, db, descr); - if (!obj) { - debug_objects_enabled = 0; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_objects_oom(); - return; - } - check_stack = true; + obj = lookup_object_or_alloc(addr, db, descr, onstack, false); + if (unlikely(!obj)) { + raw_spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return; } switch (obj->state) { @@ -607,8 +636,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack } raw_spin_unlock_irqrestore(&db->lock, flags); - if (check_stack) - debug_object_is_on_stack(addr, onstack); } /** @@ -648,14 +675,12 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack); */ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; int ret; - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; if (!debug_objects_enabled) return 0; @@ -664,8 +689,8 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); - obj = lookup_object(addr, db); - if (obj) { + obj = lookup_object_or_alloc(addr, db, descr, false, true); + if (likely(!IS_ERR_OR_NULL(obj))) { bool print_object = false; switch (obj->state) { @@ -698,24 +723,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) raw_spin_unlock_irqrestore(&db->lock, flags); - /* - * We are here when a static object is activated. We - * let the type specific code confirm whether this is - * true or not. if true, we just make sure that the - * static object is tracked in the object tracker. If - * not, this must be a bug, so we try to fix it up. - */ - if (descr->is_static_object && descr->is_static_object(addr)) { - /* track this static object */ - debug_object_init(addr, descr); - debug_object_activate(addr, descr); - } else { - debug_print_object(&o, "activate"); - ret = debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE); - return ret ? 0 : -EINVAL; + /* If NULL the allocation has hit OOM */ + if (!obj) { + debug_objects_oom(); + return 0; } - return 0; + + /* Object is neither static nor tracked. It's not initialized */ + debug_print_object(&o, "activate"); + ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); + return ret ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(debug_object_activate); @@ -869,6 +886,7 @@ EXPORT_SYMBOL_GPL(debug_object_free); */ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; @@ -879,31 +897,20 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); + obj = lookup_object_or_alloc(addr, db, descr, false, true); + raw_spin_unlock_irqrestore(&db->lock, flags); + if (likely(!IS_ERR_OR_NULL(obj))) + return; - obj = lookup_object(addr, db); + /* If NULL the allocation has hit OOM */ if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - raw_spin_unlock_irqrestore(&db->lock, flags); - /* - * Maybe the object is static, and we let the type specific - * code confirm. Track this static object if true, else invoke - * fixup. - */ - if (descr->is_static_object && descr->is_static_object(addr)) { - /* Track this static object */ - debug_object_init(addr, descr); - } else { - debug_print_object(&o, "assert_init"); - debug_object_fixup(descr->fixup_assert_init, addr, - ODEBUG_STATE_NOTAVAILABLE); - } + debug_objects_oom(); return; } - raw_spin_unlock_irqrestore(&db->lock, flags); + /* Object is neither tracked nor static. It's not initialized. */ + debug_print_object(&o, "assert_init"); + debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); } EXPORT_SYMBOL_GPL(debug_object_assert_init); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 009f2ead09c1..fdd6d9800a70 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -1223,8 +1223,7 @@ static void ddebug_attach_module_classes(struct ddebug_table *dt, * Allocate a new ddebug_table for the given module * and add it to the global list. */ -static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base, - const char *modname) +static int ddebug_add_module(struct _ddebug_info *di, const char *modname) { struct ddebug_table *dt; @@ -1263,11 +1262,6 @@ static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base, return 0; } -int ddebug_add_module(struct _ddebug_info *di, const char *modname) -{ - return __ddebug_add_module(di, 0, modname); -} - /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, const char *modname, int on_err) @@ -1314,11 +1308,13 @@ static void ddebug_table_free(struct ddebug_table *dt) kfree(dt); } +#ifdef CONFIG_MODULES + /* * Called in response to a module being unloaded. Removes * any ddebug_table's which point at the module. */ -int ddebug_remove_module(const char *mod_name) +static int ddebug_remove_module(const char *mod_name) { struct ddebug_table *dt, *nextdt; int ret = -ENOENT; @@ -1337,6 +1333,33 @@ int ddebug_remove_module(const char *mod_name) return ret; } +static int ddebug_module_notify(struct notifier_block *self, unsigned long val, + void *data) +{ + struct module *mod = data; + int ret = 0; + + switch (val) { + case MODULE_STATE_COMING: + ret = ddebug_add_module(&mod->dyndbg_info, mod->name); + if (ret) + WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n"); + break; + case MODULE_STATE_GOING: + ddebug_remove_module(mod->name); + break; + } + + return notifier_from_errno(ret); +} + +static struct notifier_block ddebug_module_nb = { + .notifier_call = ddebug_module_notify, + .priority = 0, /* dynamic debug depends on jump label */ +}; + +#endif /* CONFIG_MODULES */ + static void ddebug_remove_all_tables(void) { mutex_lock(&ddebug_lock); @@ -1388,6 +1411,14 @@ static int __init dynamic_debug_init(void) .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes, }; +#ifdef CONFIG_MODULES + ret = register_module_notifier(&ddebug_module_nb); + if (ret) { + pr_warn("Failed to register dynamic debug module notifier\n"); + return ret; + } +#endif /* CONFIG_MODULES */ + if (&__start___dyndbg == &__stop___dyndbg) { if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) { pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); @@ -1408,7 +1439,7 @@ static int __init dynamic_debug_init(void) mod_ct++; di.num_descs = mod_sites; di.descs = iter_mod_start; - ret = __ddebug_add_module(&di, i - mod_sites, modname); + ret = ddebug_add_module(&di, modname); if (ret) goto out_err; @@ -1419,7 +1450,7 @@ static int __init dynamic_debug_init(void) } di.num_descs = mod_sites; di.descs = iter_mod_start; - ret = __ddebug_add_module(&di, i - mod_sites, modname); + ret = ddebug_add_module(&di, modname); if (ret) goto out_err; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 6cff320c4eb4..d608f9b48c10 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -244,3 +244,194 @@ struct dentry *fault_create_debugfs_attr(const char *name, EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#ifdef CONFIG_FAULT_INJECTION_CONFIGFS + +/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */ + +static ssize_t fault_uint_attr_show(unsigned int val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static ssize_t fault_ulong_attr_show(unsigned long val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", val); +} + +static ssize_t fault_bool_attr_show(bool val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val)); +} + +static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count) +{ + unsigned int tmp; + int result; + + result = kstrtouint(page, 0, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count) +{ + int result; + unsigned long tmp; + + result = kstrtoul(page, 0, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count) +{ + bool tmp; + int result; + + result = kstrtobool(page, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count) +{ + int tmp; + int result; + + result = kstrtoint(page, 0, &tmp); + if (result < 0) + return result; + + atomic_set(val, tmp); + return count; +} + +#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .ca_name = _attr_name, \ + .ca_mode = 0644, \ + .ca_owner = THIS_MODULE, \ + .show = _pfx##_name##_show, \ + .store = _pfx##_name##_store, \ +} + +static struct fault_config *to_fault_config(struct config_item *item) +{ + return container_of(to_config_group(item), struct fault_config, group); +} + +#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \ +static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \ +{ \ + return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \ +} \ +static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \ +{ \ + struct fault_config *config = to_fault_config(item); \ + return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \ +} \ +CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME) + +#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \ + FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE) + +FAULT_CONFIGFS_ATTR(probability, ulong); +FAULT_CONFIGFS_ATTR(interval, ulong); +FAULT_CONFIGFS_ATTR(times, atomic_t); +FAULT_CONFIGFS_ATTR(space, atomic_t); +FAULT_CONFIGFS_ATTR(verbose, ulong); +FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms", + ratelimit_state.interval, uint); +FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst", + ratelimit_state.burst, uint); +FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool); + +#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER + +static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page) +{ + return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page); +} + +static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page, + size_t count) +{ + int result; + unsigned long tmp; + + result = kstrtoul(page, 0, &tmp); + if (result < 0) + return result; + + to_fault_config(item)->attr.stacktrace_depth = + min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH); + + return count; +} + +CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth"); + +static ssize_t fault_xul_attr_show(unsigned long val, char *page) +{ + return snprintf(page, PAGE_SIZE, + sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val); +} + +static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count) +{ + return fault_ulong_attr_store(val, page, count); +} + +FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul); +FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul); +FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul); +FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul); + +#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ + +static struct configfs_attribute *fault_config_attrs[] = { + &fault_attr_probability, + &fault_attr_interval, + &fault_attr_times, + &fault_attr_space, + &fault_attr_verbose, + &fault_attr_ratelimit_interval, + &fault_attr_ratelimit_burst, + &fault_attr_task_filter, +#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER + &fault_attr_stacktrace_depth, + &fault_attr_require_start, + &fault_attr_require_end, + &fault_attr_reject_start, + &fault_attr_reject_end, +#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ + NULL, +}; + +static const struct config_item_type fault_config_type = { + .ct_attrs = fault_config_attrs, + .ct_owner = THIS_MODULE, +}; + +void fault_config_init(struct fault_config *config, const char *name) +{ + config_group_init_type_name(&config->group, name, &fault_config_type); +} +EXPORT_SYMBOL_GPL(fault_config_init); + +#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */ diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 274014e4eafe..967fba189c5f 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -126,13 +126,13 @@ __out: \ iterate_buf(i, n, base, len, off, \ i->ubuf, (I)) \ } else if (likely(iter_is_iovec(i))) { \ - const struct iovec *iov = i->iov; \ + const struct iovec *iov = iter_iov(i); \ void __user *base; \ size_t len; \ iterate_iovec(i, n, base, len, off, \ iov, (I)) \ - i->nr_segs -= iov - i->iov; \ - i->iov = iov; \ + i->nr_segs -= iov - iter_iov(i); \ + i->__iov = iov; \ } else if (iov_iter_is_bvec(i)) { \ const struct bio_vec *bvec = i->bvec; \ void *base; \ @@ -355,7 +355,7 @@ size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) size_t skip; size -= count; - for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; @@ -398,7 +398,7 @@ size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) size_t skip; size -= count; - for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; @@ -425,7 +425,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, .nofault = false, .user_backed = true, .data_source = direction, - .iov = iov, + .__iov = iov, .nr_segs = nr_segs, .iov_offset = 0, .count = count @@ -876,14 +876,14 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) i->count -= size; size += i->iov_offset; // from beginning of current segment - for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { + for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { if (likely(size < iov->iov_len)) break; size -= iov->iov_len; } i->iov_offset = size; - i->nr_segs -= iov - i->iov; - i->iov = iov; + i->nr_segs -= iov - iter_iov(i); + i->__iov = iov; } void iov_iter_advance(struct iov_iter *i, size_t size) @@ -958,12 +958,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) unroll -= n; } } else { /* same logics for iovec and kvec */ - const struct iovec *iov = i->iov; + const struct iovec *iov = iter_iov(i); while (1) { size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { - i->iov = iov; + i->__iov = iov; i->iov_offset = n - unroll; return; } @@ -980,7 +980,7 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) { if (i->nr_segs > 1) { if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) - return min(i->count, i->iov->iov_len - i->iov_offset); + return min(i->count, iter_iov(i)->iov_len - i->iov_offset); if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); } @@ -1095,13 +1095,14 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, unsigned k; for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->iov[k].iov_len - skip; + const struct iovec *iov = iter_iov(i) + k; + size_t len = iov->iov_len - skip; if (len > size) len = size; if (len & len_mask) return false; - if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) + if ((unsigned long)(iov->iov_base + skip) & addr_mask) return false; size -= len; @@ -1194,9 +1195,10 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) unsigned k; for (k = 0; k < i->nr_segs; k++, skip = 0) { - size_t len = i->iov[k].iov_len - skip; + const struct iovec *iov = iter_iov(i) + k; + size_t len = iov->iov_len - skip; if (len) { - res |= (unsigned long)i->iov[k].iov_base + skip; + res |= (unsigned long)iov->iov_base + skip; if (len > size) len = size; res |= len; @@ -1273,14 +1275,15 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) return ~0U; for (k = 0; k < i->nr_segs; k++) { - if (i->iov[k].iov_len) { - unsigned long base = (unsigned long)i->iov[k].iov_base; + const struct iovec *iov = iter_iov(i) + k; + if (iov->iov_len) { + unsigned long base = (unsigned long)iov->iov_base; if (v) // if not the first one res |= base | v; // this start | previous end - v = base + i->iov[k].iov_len; - if (size <= i->iov[k].iov_len) + v = base + iov->iov_len; + if (size <= iov->iov_len) break; - size -= i->iov[k].iov_len; + size -= iov->iov_len; } } return res; @@ -1396,13 +1399,14 @@ static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) return (unsigned long)i->ubuf + i->iov_offset; for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { - size_t len = i->iov[k].iov_len - skip; + const struct iovec *iov = iter_iov(i) + k; + size_t len = iov->iov_len - skip; if (unlikely(!len)) continue; if (*size > len) *size = len; - return (unsigned long)i->iov[k].iov_base + skip; + return (unsigned long)iov->iov_base + skip; } BUG(); // if it had been empty, we wouldn't get called } @@ -1614,7 +1618,7 @@ static int iov_npages(const struct iov_iter *i, int maxpages) const struct iovec *p; int npages = 0; - for (p = i->iov; size; skip = 0, p++) { + for (p = iter_iov(i); size; skip = 0, p++) { unsigned offs = offset_in_page(p->iov_base + skip); size_t len = min(p->iov_len - skip, size); @@ -1691,14 +1695,14 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) flags); else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) /* iovec and kvec have identical layout */ - return new->iov = kmemdup(new->iov, + return new->__iov = kmemdup(new->__iov, new->nr_segs * sizeof(struct iovec), flags); return NULL; } EXPORT_SYMBOL(dup_iter); -static int copy_compat_iovec_from_user(struct iovec *iov, +static __noclone int copy_compat_iovec_from_user(struct iovec *iov, const struct iovec __user *uvec, unsigned long nr_segs) { const struct compat_iovec __user *uiov = @@ -1731,18 +1735,35 @@ uaccess_end: } static int copy_iovec_from_user(struct iovec *iov, - const struct iovec __user *uvec, unsigned long nr_segs) + const struct iovec __user *uiov, unsigned long nr_segs) { - unsigned long seg; + int ret = -EFAULT; - if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) + if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT; - for (seg = 0; seg < nr_segs; seg++) { - if ((ssize_t)iov[seg].iov_len < 0) - return -EINVAL; - } - return 0; + do { + void __user *buf; + ssize_t len; + + unsafe_get_user(len, &uiov->iov_len, uaccess_end); + unsafe_get_user(buf, &uiov->iov_base, uaccess_end); + + /* check for size_t not fitting in ssize_t .. */ + if (unlikely(len < 0)) { + ret = -EINVAL; + goto uaccess_end; + } + iov->iov_base = buf; + iov->iov_len = len; + + uiov++; iov++; + } while (--nr_segs); + + ret = 0; +uaccess_end: + user_access_end(); + return ret; } struct iovec *iovec_from_user(const struct iovec __user *uvec, @@ -1767,7 +1788,7 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec, return ERR_PTR(-ENOMEM); } - if (compat) + if (unlikely(compat)) ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); else ret = copy_iovec_from_user(iov, uvec, nr_segs); @@ -1780,6 +1801,30 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec, return iov; } +/* + * Single segment iovec supplied by the user, import it as ITER_UBUF. + */ +static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec, + struct iovec **iovp, struct iov_iter *i, + bool compat) +{ + struct iovec *iov = *iovp; + ssize_t ret; + + if (compat) + ret = copy_compat_iovec_from_user(iov, uvec, 1); + else + ret = copy_iovec_from_user(iov, uvec, 1); + if (unlikely(ret)) + return ret; + + ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); + if (unlikely(ret)) + return ret; + *iovp = NULL; + return i->count; +} + ssize_t __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat) @@ -1788,6 +1833,9 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec, unsigned long seg; struct iovec *iov; + if (nr_segs == 1) + return __import_iovec_ubuf(type, uvec, iovp, i, compat); + iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); if (IS_ERR(iov)) { *iovp = NULL; @@ -1866,9 +1914,7 @@ int import_single_range(int rw, void __user *buf, size_t len, if (unlikely(!access_ok(buf, len))) return -EFAULT; - iov->iov_base = buf; - iov->iov_len = len; - iov_iter_init(i, rw, iov, 1, len); + iov_iter_ubuf(i, rw, buf, len); return 0; } EXPORT_SYMBOL(import_single_range); @@ -1918,7 +1964,7 @@ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) if (iov_iter_is_bvec(i)) i->bvec -= state->nr_segs - i->nr_segs; else - i->iov -= state->nr_segs - i->nr_segs; + i->__iov -= state->nr_segs - i->nr_segs; i->nr_segs = state->nr_segs; } diff --git a/lib/kobject.c b/lib/kobject.c index 6e2f0bee3560..f79a434e1231 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -10,6 +10,8 @@ * about using the kobject interface. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> @@ -127,7 +129,7 @@ static int fill_kobj_path(const struct kobject *kobj, char *path, int length) *(path + --length) = '/'; } - pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), + pr_debug("'%s' (%p): %s: path = '%s'\n", kobject_name(kobj), kobj, __func__, path); return 0; @@ -223,7 +225,7 @@ static int kobject_add_internal(struct kobject *kobj) kobj->parent = parent; } - pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", + pr_debug("'%s' (%p): %s: parent: '%s', set: '%s'\n", kobject_name(kobj), kobj, __func__, parent ? kobject_name(parent) : "<NULL>", kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); @@ -338,7 +340,7 @@ void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) /* do not error out as sometimes we can recover */ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n", kobj); - dump_stack(); + dump_stack_lvl(KERN_ERR); } kobject_init_internal(kobj); @@ -347,7 +349,7 @@ void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) error: pr_err("kobject (%p): %s\n", kobj, err_str); - dump_stack(); + dump_stack_lvl(KERN_ERR); } EXPORT_SYMBOL(kobject_init); @@ -359,7 +361,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj, retval = kobject_set_name_vargs(kobj, fmt, vargs); if (retval) { - pr_err("kobject: can not set name properly!\n"); + pr_err("can not set name properly!\n"); return retval; } kobj->parent = parent; @@ -411,7 +413,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent, if (!kobj->state_initialized) { pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n", kobject_name(kobj), kobj); - dump_stack(); + dump_stack_lvl(KERN_ERR); return -EINVAL; } va_start(args, fmt); @@ -588,7 +590,7 @@ static void __kobject_del(struct kobject *kobj) /* send "remove" if the caller did not do it but sent "add" */ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { - pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", + pr_debug("'%s' (%p): auto cleanup 'remove' event\n", kobject_name(kobj), kobj); kobject_uevent(kobj, KOBJ_REMOVE); } @@ -658,16 +660,16 @@ static void kobject_cleanup(struct kobject *kobj) const struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; - pr_debug("kobject: '%s' (%p): %s, parent %p\n", + pr_debug("'%s' (%p): %s, parent %p\n", kobject_name(kobj), kobj, __func__, kobj->parent); if (t && !t->release) - pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", + pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", kobject_name(kobj), kobj); /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { - pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", + pr_debug("'%s' (%p): auto cleanup kobject_del\n", kobject_name(kobj), kobj); __kobject_del(kobj); } else { @@ -676,14 +678,14 @@ static void kobject_cleanup(struct kobject *kobj) } if (t && t->release) { - pr_debug("kobject: '%s' (%p): calling ktype release\n", + pr_debug("'%s' (%p): calling ktype release\n", kobject_name(kobj), kobj); t->release(kobj); } /* free name if we allocated it */ if (name) { - pr_debug("kobject: '%s': free name\n", name); + pr_debug("'%s': free name\n", name); kfree_const(name); } @@ -703,8 +705,8 @@ static void kobject_release(struct kref *kref) struct kobject *kobj = container_of(kref, struct kobject, kref); #ifdef CONFIG_DEBUG_KOBJECT_RELEASE unsigned long delay = HZ + HZ * get_random_u32_below(4); - pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", - kobject_name(kobj), kobj, __func__, kobj->parent, delay); + pr_info("'%s' (%p): %s, parent %p (delayed %ld)\n", + kobject_name(kobj), kobj, __func__, kobj->parent, delay); INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); schedule_delayed_work(&kobj->release, delay); @@ -733,7 +735,7 @@ EXPORT_SYMBOL(kobject_put); static void dynamic_kobj_release(struct kobject *kobj) { - pr_debug("kobject: (%p): %s\n", kobj, __func__); + pr_debug("(%p): %s\n", kobj, __func__); kfree(kobj); } @@ -910,7 +912,7 @@ EXPORT_SYMBOL_GPL(kset_find_obj); static void kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); - pr_debug("kobject: '%s' (%p): %s\n", + pr_debug("'%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); kfree(kset); } diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c index de0ee2e03ed6..b08bb1fba106 100644 --- a/lib/kunit/debugfs.c +++ b/lib/kunit/debugfs.c @@ -55,14 +55,24 @@ static int debugfs_print_results(struct seq_file *seq, void *v) enum kunit_status success = kunit_suite_has_succeeded(suite); struct kunit_case *test_case; - if (!suite || !suite->log) + if (!suite) return 0; - seq_printf(seq, "%s", suite->log); + /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */ + seq_puts(seq, "KTAP version 1\n"); + seq_puts(seq, "1..1\n"); + + /* Print suite header because it is not stored in the test logs. */ + seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n"); + seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name); + seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite)); kunit_suite_for_each_test_case(suite, test_case) debugfs_print_result(seq, suite, test_case); + if (suite->log) + seq_printf(seq, "%s", suite->log); + seq_printf(seq, "%s %d %s\n", kunit_status_to_ok_not_ok(success), 1, suite->name); return 0; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 4df0335d0d06..42e44caa1bdd 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -6,6 +6,7 @@ * Author: Brendan Higgins <brendanhiggins@google.com> */ #include <kunit/test.h> +#include <kunit/test-bug.h> #include "try-catch-impl.h" @@ -443,18 +444,6 @@ static struct kunit_suite kunit_resource_test_suite = { .test_cases = kunit_resource_test_cases, }; -static void kunit_log_test(struct kunit *test); - -static struct kunit_case kunit_log_test_cases[] = { - KUNIT_CASE(kunit_log_test), - {} -}; - -static struct kunit_suite kunit_log_test_suite = { - .name = "kunit-log-test", - .test_cases = kunit_log_test_cases, -}; - static void kunit_log_test(struct kunit *test) { struct kunit_suite suite; @@ -481,6 +470,29 @@ static void kunit_log_test(struct kunit *test) #endif } +static void kunit_log_newline_test(struct kunit *test) +{ + kunit_info(test, "Add newline\n"); + if (test->log) { + KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(test->log, "Add newline\n"), + "Missing log line, full log:\n%s", test->log); + KUNIT_EXPECT_NULL(test, strstr(test->log, "Add newline\n\n")); + } else { + kunit_skip(test, "only useful when debugfs is enabled"); + } +} + +static struct kunit_case kunit_log_test_cases[] = { + KUNIT_CASE(kunit_log_test), + KUNIT_CASE(kunit_log_newline_test), + {} +}; + +static struct kunit_suite kunit_log_test_suite = { + .name = "kunit-log-test", + .test_cases = kunit_log_test_cases, +}; + static void kunit_status_set_failure_test(struct kunit *test) { struct kunit fake; @@ -521,7 +533,46 @@ static struct kunit_suite kunit_status_test_suite = { .test_cases = kunit_status_test_cases, }; +static void kunit_current_test(struct kunit *test) +{ + /* Check results of both current->kunit_test and + * kunit_get_current_test() are equivalent to current test. + */ + KUNIT_EXPECT_PTR_EQ(test, test, current->kunit_test); + KUNIT_EXPECT_PTR_EQ(test, test, kunit_get_current_test()); +} + +static void kunit_current_fail_test(struct kunit *test) +{ + struct kunit fake; + + kunit_init_test(&fake, "fake test", NULL); + KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS); + + /* Set current->kunit_test to fake test. */ + current->kunit_test = &fake; + + kunit_fail_current_test("This should make `fake` test fail."); + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE); + kunit_cleanup(&fake); + + /* Reset current->kunit_test to current test. */ + current->kunit_test = test; +} + +static struct kunit_case kunit_current_test_cases[] = { + KUNIT_CASE(kunit_current_test), + KUNIT_CASE(kunit_current_fail_test), + {} +}; + +static struct kunit_suite kunit_current_test_suite = { + .name = "kunit_current", + .test_cases = kunit_current_test_cases, +}; + kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite, - &kunit_log_test_suite, &kunit_status_test_suite); + &kunit_log_test_suite, &kunit_status_test_suite, + &kunit_current_test_suite); MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index c9e15bb60058..e2910b261112 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -108,28 +108,51 @@ static void kunit_print_test_stats(struct kunit *test, stats.total); } +/** + * kunit_log_newline() - Add newline to the end of log if one is not + * already present. + * @log: The log to add the newline to. + */ +static void kunit_log_newline(char *log) +{ + int log_len, len_left; + + log_len = strlen(log); + len_left = KUNIT_LOG_SIZE - log_len - 1; + + if (log_len > 0 && log[log_len - 1] != '\n') + strncat(log, "\n", len_left); +} + /* * Append formatted message to log, size of which is limited to * KUNIT_LOG_SIZE bytes (including null terminating byte). */ void kunit_log_append(char *log, const char *fmt, ...) { - char line[KUNIT_LOG_SIZE]; va_list args; - int len_left; + int len, log_len, len_left; if (!log) return; - len_left = KUNIT_LOG_SIZE - strlen(log) - 1; + log_len = strlen(log); + len_left = KUNIT_LOG_SIZE - log_len - 1; if (len_left <= 0) return; + /* Evaluate length of line to add to log */ + va_start(args, fmt); + len = vsnprintf(NULL, 0, fmt, args) + 1; + va_end(args); + + /* Print formatted line to the log */ va_start(args, fmt); - vsnprintf(line, sizeof(line), fmt, args); + vsnprintf(log + log_len, min(len, len_left), fmt, args); va_end(args); - strncat(log, line, len_left); + /* Add newline to end of log if not already present. */ + kunit_log_newline(log); } EXPORT_SYMBOL_GPL(kunit_log_append); @@ -147,10 +170,18 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases); static void kunit_print_suite_start(struct kunit_suite *suite) { - kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n"); - kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s", + /* + * We do not log the test suite header as doing so would + * mean debugfs display would consist of the test suite + * header prior to individual test results. + * Hence directly printk the suite status, and we will + * separately seq_printf() the suite header for the debugfs + * representation. + */ + pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n"); + pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name); - kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd", + pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite)); } @@ -167,10 +198,9 @@ static void kunit_print_ok_not_ok(void *test_or_suite, /* * We do not log the test suite results as doing so would - * mean debugfs display would consist of the test suite - * description and status prior to individual test results. - * Hence directly printk the suite status, and we will - * separately seq_printf() the suite status for the debugfs + * mean debugfs display would consist of an incorrect test + * number. Hence directly printk the suite result, and we will + * separately seq_printf() the suite results for the debugfs * representation. */ if (suite) @@ -437,7 +467,6 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, struct kunit_try_catch_context context; struct kunit_try_catch *try_catch; - kunit_init_test(test, test_case->name, test_case->log); try_catch = &test->try_catch; kunit_try_catch_init(try_catch, @@ -533,6 +562,8 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats param_stats = { 0 }; test_case->status = KUNIT_SKIPPED; + kunit_init_test(&test, test_case->name, test_case->log); + if (!test_case->generate_params) { /* Non-parameterised test. */ kunit_run_case_catch_errors(suite, test_case, &test); diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 5ca0d815a95d..649e687413a0 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -65,12 +65,6 @@ static void __exit libcrc32c_mod_fini(void) crypto_free_shash(tfm); } -const char *crc32c_impl(void) -{ - return crypto_shash_driver_name(tfm); -} -EXPORT_SYMBOL(crc32c_impl); - module_init(libcrc32c_mod_init); module_exit(libcrc32c_mod_fini); diff --git a/lib/list-test.c b/lib/list-test.c index d374cf5d1a57..0cc27de9cec8 100644 --- a/lib/list-test.c +++ b/lib/list-test.c @@ -8,6 +8,7 @@ #include <kunit/test.h> #include <linux/list.h> +#include <linux/klist.h> struct list_test_struct { int data; @@ -1199,6 +1200,303 @@ static struct kunit_suite hlist_test_module = { .test_cases = hlist_test_cases, }; -kunit_test_suites(&list_test_module, &hlist_test_module); + +struct klist_test_struct { + int data; + struct klist klist; + struct klist_node klist_node; +}; + +static int node_count; +static struct klist_node *last_node; + +static void check_node(struct klist_node *node_ptr) +{ + node_count++; + last_node = node_ptr; +} + +static void check_delete_node(struct klist_node *node_ptr) +{ + node_count--; + last_node = node_ptr; +} + +static void klist_test_add_tail(struct kunit *test) +{ + struct klist_node a, b; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, NULL); + + klist_add_tail(&a, &mylist); + KUNIT_EXPECT_EQ(test, node_count, 1); + KUNIT_EXPECT_PTR_EQ(test, last_node, &a); + + klist_add_tail(&b, &mylist); + KUNIT_EXPECT_EQ(test, node_count, 2); + KUNIT_EXPECT_PTR_EQ(test, last_node, &b); + + /* should be [list] -> a -> b */ + klist_iter_init(&mylist, &i); + + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +static void klist_test_add_head(struct kunit *test) +{ + struct klist_node a, b; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, NULL); + + klist_add_head(&a, &mylist); + KUNIT_EXPECT_EQ(test, node_count, 1); + KUNIT_EXPECT_PTR_EQ(test, last_node, &a); + + klist_add_head(&b, &mylist); + KUNIT_EXPECT_EQ(test, node_count, 2); + KUNIT_EXPECT_PTR_EQ(test, last_node, &b); + + /* should be [list] -> b -> a */ + klist_iter_init(&mylist, &i); + + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +static void klist_test_add_behind(struct kunit *test) +{ + struct klist_node a, b, c, d; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, NULL); + + klist_add_head(&a, &mylist); + klist_add_head(&b, &mylist); + + klist_add_behind(&c, &a); + KUNIT_EXPECT_EQ(test, node_count, 3); + KUNIT_EXPECT_PTR_EQ(test, last_node, &c); + + klist_add_behind(&d, &b); + KUNIT_EXPECT_EQ(test, node_count, 4); + KUNIT_EXPECT_PTR_EQ(test, last_node, &d); + + klist_iter_init(&mylist, &i); + + /* should be [list] -> b -> d -> a -> c*/ + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +static void klist_test_add_before(struct kunit *test) +{ + struct klist_node a, b, c, d; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, NULL); + + klist_add_head(&a, &mylist); + klist_add_head(&b, &mylist); + klist_add_before(&c, &a); + KUNIT_EXPECT_EQ(test, node_count, 3); + KUNIT_EXPECT_PTR_EQ(test, last_node, &c); + + klist_add_before(&d, &b); + KUNIT_EXPECT_EQ(test, node_count, 4); + KUNIT_EXPECT_PTR_EQ(test, last_node, &d); + + klist_iter_init(&mylist, &i); + + /* should be [list] -> b -> d -> a -> c*/ + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +/* + * Verify that klist_del() delays the deletion of a node until there + * are no other references to it + */ +static void klist_test_del_refcount_greater_than_zero(struct kunit *test) +{ + struct klist_node a, b, c, d; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, &check_delete_node); + + /* Add nodes a,b,c,d to the list*/ + klist_add_tail(&a, &mylist); + klist_add_tail(&b, &mylist); + klist_add_tail(&c, &mylist); + klist_add_tail(&d, &mylist); + + klist_iter_init(&mylist, &i); + + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + /* Advance the iterator to point to node c*/ + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c); + + /* Try to delete node c while there is a reference to it*/ + klist_del(&c); + + /* + * Verify that node c is still attached to the list even after being + * deleted. Since the iterator still points to c, the reference count is not + * decreased to 0 + */ + KUNIT_EXPECT_TRUE(test, klist_node_attached(&c)); + + /* Check that node c has not been removed yet*/ + KUNIT_EXPECT_EQ(test, node_count, 4); + KUNIT_EXPECT_PTR_EQ(test, last_node, &d); + + klist_iter_exit(&i); + + /* + * Since the iterator is no longer pointing to node c, node c is removed + * from the list + */ + KUNIT_EXPECT_EQ(test, node_count, 3); + KUNIT_EXPECT_PTR_EQ(test, last_node, &c); + +} + +/* + * Verify that klist_del() deletes a node immediately when there are no + * other references to it. + */ +static void klist_test_del_refcount_zero(struct kunit *test) +{ + struct klist_node a, b, c, d; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, &check_delete_node); + + /* Add nodes a,b,c,d to the list*/ + klist_add_tail(&a, &mylist); + klist_add_tail(&b, &mylist); + klist_add_tail(&c, &mylist); + klist_add_tail(&d, &mylist); + /* Delete node c*/ + klist_del(&c); + + /* Check that node c is deleted from the list*/ + KUNIT_EXPECT_EQ(test, node_count, 3); + KUNIT_EXPECT_PTR_EQ(test, last_node, &c); + + /* Should be [list] -> a -> b -> d*/ + klist_iter_init(&mylist, &i); + + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +static void klist_test_remove(struct kunit *test) +{ + /* This test doesn't check correctness under concurrent access */ + struct klist_node a, b, c, d; + struct klist mylist; + struct klist_iter i; + + node_count = 0; + klist_init(&mylist, &check_node, &check_delete_node); + + /* Add nodes a,b,c,d to the list*/ + klist_add_tail(&a, &mylist); + klist_add_tail(&b, &mylist); + klist_add_tail(&c, &mylist); + klist_add_tail(&d, &mylist); + /* Delete node c*/ + klist_remove(&c); + + /* Check the nodes in the list*/ + KUNIT_EXPECT_EQ(test, node_count, 3); + KUNIT_EXPECT_PTR_EQ(test, last_node, &c); + + /* should be [list] -> a -> b -> d*/ + klist_iter_init(&mylist, &i); + + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b); + KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d); + KUNIT_EXPECT_NULL(test, klist_next(&i)); + + klist_iter_exit(&i); + +} + +static void klist_test_node_attached(struct kunit *test) +{ + struct klist_node a = {}; + struct klist mylist; + + klist_init(&mylist, NULL, NULL); + + KUNIT_EXPECT_FALSE(test, klist_node_attached(&a)); + klist_add_head(&a, &mylist); + KUNIT_EXPECT_TRUE(test, klist_node_attached(&a)); + klist_del(&a); + KUNIT_EXPECT_FALSE(test, klist_node_attached(&a)); + +} + +static struct kunit_case klist_test_cases[] = { + KUNIT_CASE(klist_test_add_tail), + KUNIT_CASE(klist_test_add_head), + KUNIT_CASE(klist_test_add_behind), + KUNIT_CASE(klist_test_add_before), + KUNIT_CASE(klist_test_del_refcount_greater_than_zero), + KUNIT_CASE(klist_test_del_refcount_zero), + KUNIT_CASE(klist_test_remove), + KUNIT_CASE(klist_test_node_attached), + {}, +}; + +static struct kunit_suite klist_test_module = { + .name = "klist", + .test_cases = klist_test_cases, +}; + +kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module); MODULE_LICENSE("GPL v2"); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index db60edb55f2f..1281a40d5735 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1303,26 +1303,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) node = mas->alloc; node->request_count = 0; while (requested) { - max_req = MAPLE_ALLOC_SLOTS; - if (node->node_count) { - unsigned int offset = node->node_count; - - slots = (void **)&node->slot[offset]; - max_req -= offset; - } else { - slots = (void **)&node->slot; - } - + max_req = MAPLE_ALLOC_SLOTS - node->node_count; + slots = (void **)&node->slot[node->node_count]; max_req = min(requested, max_req); count = mt_alloc_bulk(gfp, max_req, slots); if (!count) goto nomem_bulk; + if (node->node_count == 0) { + node->slot[0]->node_count = 0; + node->slot[0]->request_count = 0; + } + node->node_count += count; allocated += count; node = node->slot[0]; - node->node_count = 0; - node->request_count = 0; requested -= count; } mas->alloc->total = allocated; @@ -4970,7 +4965,8 @@ not_found: * Return: True if found in a leaf, false otherwise. * */ -static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) +static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, + unsigned long *gap_min, unsigned long *gap_max) { enum maple_type type = mte_node_type(mas->node); struct maple_node *node = mas_mn(mas); @@ -5035,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) if (unlikely(ma_is_leaf(type))) { mas->offset = offset; - mas->min = min; - mas->max = min + gap - 1; + *gap_min = min; + *gap_max = min + gap - 1; return true; } @@ -5060,10 +5056,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) { enum maple_type type = mte_node_type(mas->node); unsigned long pivot, min, gap = 0; - unsigned char offset; - unsigned long *gaps; - unsigned long *pivots = ma_pivots(mas_mn(mas), type); - void __rcu **slots = ma_slots(mas_mn(mas), type); + unsigned char offset, data_end; + unsigned long *gaps, *pivots; + void __rcu **slots; + struct maple_node *node; bool found = false; if (ma_is_dense(type)) { @@ -5071,13 +5067,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) return true; } - gaps = ma_gaps(mte_to_node(mas->node), type); + node = mas_mn(mas); + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + gaps = ma_gaps(node, type); offset = mas->offset; min = mas_safe_min(mas, pivots, offset); - for (; offset < mt_slots[type]; offset++) { - pivot = mas_safe_pivot(mas, pivots, offset, type); - if (offset && !pivot) - break; + data_end = ma_data_end(node, type, pivots, mas->max); + for (; offset <= data_end; offset++) { + pivot = mas_logical_pivot(mas, pivots, offset, type); /* Not within lower bounds */ if (mas->index > pivot) @@ -5312,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long *pivots; enum maple_type mt; + if (min >= max) + return -EINVAL; + if (mas_is_start(mas)) mas_start(mas); else if (mas->offset >= 2) @@ -5366,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { struct maple_enode *last = mas->node; + if (min >= max) + return -EINVAL; + if (mas_is_start(mas)) { mas_start(mas); mas->offset = mas_data_end(mas); @@ -5385,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, mas->index = min; mas->last = max; - while (!mas_rev_awalk(mas, size)) { + while (!mas_rev_awalk(mas, size, &min, &max)) { if (last == mas->node) { if (!mas_rewind_node(mas)) return -EBUSY; @@ -5400,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) return -EBUSY; - /* - * mas_rev_awalk() has set mas->min and mas->max to the gap values. If - * the maximum is outside the window we are searching, then use the last - * location in the search. - * mas->max and mas->min is the range of the gap. - * mas->index and mas->last are currently set to the search range. - */ - /* Trim the upper limit to the max. */ - if (mas->max <= mas->last) - mas->last = mas->max; + if (max <= mas->last) + mas->last = max; mas->index = mas->last - size + 1; return 0; diff --git a/lib/packing.c b/lib/packing.c index a96169237ae6..3f656167c17e 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -198,5 +198,4 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, } EXPORT_SYMBOL(packing); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic bitfield packing and unpacking"); diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c index 6e77eb6d8e72..54e1809a38fd 100644 --- a/lib/pldmfw/pldmfw.c +++ b/lib/pldmfw/pldmfw.c @@ -875,5 +875,4 @@ out_release_data: EXPORT_SYMBOL(pldmfw_flash_image); MODULE_AUTHOR("Jacob Keller <jacob.e.keller@intel.com>"); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PLDM firmware flash update library"); diff --git a/lib/rcuref.c b/lib/rcuref.c new file mode 100644 index 000000000000..5ec00a4a64d1 --- /dev/null +++ b/lib/rcuref.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * rcuref - A scalable reference count implementation for RCU managed objects + * + * rcuref is provided to replace open coded reference count implementations + * based on atomic_t. It protects explicitely RCU managed objects which can + * be visible even after the last reference has been dropped and the object + * is heading towards destruction. + * + * A common usage pattern is: + * + * get() + * rcu_read_lock(); + * p = get_ptr(); + * if (p && !atomic_inc_not_zero(&p->refcnt)) + * p = NULL; + * rcu_read_unlock(); + * return p; + * + * put() + * if (!atomic_dec_return(&->refcnt)) { + * remove_ptr(p); + * kfree_rcu((p, rcu); + * } + * + * atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has + * O(N^2) behaviour under contention with N concurrent operations. + * + * rcuref uses atomic_add_negative_relaxed() for the fast path, which scales + * better under contention. + * + * Why not refcount? + * ================= + * + * In principle it should be possible to make refcount use the rcuref + * scheme, but the destruction race described below cannot be prevented + * unless the protected object is RCU managed. + * + * Theory of operation + * =================== + * + * rcuref uses an unsigned integer reference counter. As long as the + * counter value is greater than or equal to RCUREF_ONEREF and not larger + * than RCUREF_MAXREF the reference is alive: + * + * ONEREF MAXREF SATURATED RELEASED DEAD NOREF + * 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF + * <---valid --------> <-------saturation zone-------> <-----dead zone-----> + * + * The get() and put() operations do unconditional increments and + * decrements. The result is checked after the operation. This optimizes + * for the fast path. + * + * If the reference count is saturated or dead, then the increments and + * decrements are not harmful as the reference count still stays in the + * respective zones and is always set back to STATURATED resp. DEAD. The + * zones have room for 2^28 racing operations in each direction, which + * makes it practically impossible to escape the zones. + * + * Once the last reference is dropped the reference count becomes + * RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The + * slowpath then tries to set the reference count from RCUREF_NOREF to + * RCUREF_DEAD via a cmpxchg(). This opens a small window where a + * concurrent rcuref_get() can acquire the reference count and bring it + * back to RCUREF_ONEREF or even drop the reference again and mark it DEAD. + * + * If the cmpxchg() succeeds then a concurrent rcuref_get() will result in + * DEAD + 1, which is inside the dead zone. If that happens the reference + * count is put back to DEAD. + * + * The actual race is possible due to the unconditional increment and + * decrements in rcuref_get() and rcuref_put(): + * + * T1 T2 + * get() put() + * if (atomic_add_negative(-1, &ref->refcnt)) + * succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); + * + * atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1 + * + * As the result of T1's add is negative, the get() goes into the slow path + * and observes refcnt being in the dead zone which makes the operation fail. + * + * Possible critical states: + * + * Context Counter References Operation + * T1 0 1 init() + * T2 1 2 get() + * T1 0 1 put() + * T2 -1 0 put() tries to mark dead + * T1 0 1 get() + * T2 0 1 put() mark dead fails + * T1 -1 0 put() tries to mark dead + * T1 DEAD 0 put() mark dead succeeds + * T2 DEAD+1 0 get() fails and puts it back to DEAD + * + * Of course there are more complex scenarios, but the above illustrates + * the working principle. The rest is left to the imagination of the + * reader. + * + * Deconstruction race + * =================== + * + * The release operation must be protected by prohibiting a grace period in + * order to prevent a possible use after free: + * + * T1 T2 + * put() get() + * // ref->refcnt = ONEREF + * if (!atomic_add_negative(-1, &ref->refcnt)) + * return false; <- Not taken + * + * // ref->refcnt == NOREF + * --> preemption + * // Elevates ref->refcnt to ONEREF + * if (!atomic_add_negative(1, &ref->refcnt)) + * return true; <- taken + * + * if (put(&p->ref)) { <-- Succeeds + * remove_pointer(p); + * kfree_rcu(p, rcu); + * } + * + * RCU grace period ends, object is freed + * + * atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF + * + * This is prevented by disabling preemption around the put() operation as + * that's in most kernel configurations cheaper than a rcu_read_lock() / + * rcu_read_unlock() pair and in many cases even a NOOP. In any case it + * prevents the grace period which keeps the object alive until all put() + * operations complete. + * + * Saturation protection + * ===================== + * + * The reference count has a saturation limit RCUREF_MAXREF (INT_MAX). + * Once this is exceedded the reference count becomes stale by setting it + * to RCUREF_SATURATED, which will cause a memory leak, but it prevents + * wrap arounds which obviously cause worse problems than a memory + * leak. When saturation is reached a warning is emitted. + * + * Race conditions + * =============== + * + * All reference count increment/decrement operations are unconditional and + * only verified after the fact. This optimizes for the good case and takes + * the occasional race vs. a dead or already saturated refcount into + * account. The saturation and dead zones are large enough to accomodate + * for that. + * + * Memory ordering + * =============== + * + * Memory ordering rules are slightly relaxed wrt regular atomic_t functions + * and provide only what is strictly required for refcounts. + * + * The increments are fully relaxed; these will not provide ordering. The + * rationale is that whatever is used to obtain the object to increase the + * reference count on will provide the ordering. For locked data + * structures, its the lock acquire, for RCU/lockless data structures its + * the dependent load. + * + * rcuref_get() provides a control dependency ordering future stores which + * ensures that the object is not modified when acquiring a reference + * fails. + * + * rcuref_put() provides release order, i.e. all prior loads and stores + * will be issued before. It also provides a control dependency ordering + * against the subsequent destruction of the object. + * + * If rcuref_put() successfully dropped the last reference and marked the + * object DEAD it also provides acquire ordering. + */ + +#include <linux/export.h> +#include <linux/rcuref.h> + +/** + * rcuref_get_slowpath - Slowpath of rcuref_get() + * @ref: Pointer to the reference count + * + * Invoked when the reference count is outside of the valid zone. + * + * Return: + * False if the reference count was already marked dead + * + * True if the reference count is saturated, which prevents the + * object from being deconstructed ever. + */ +bool rcuref_get_slowpath(rcuref_t *ref) +{ + unsigned int cnt = atomic_read(&ref->refcnt); + + /* + * If the reference count was already marked dead, undo the + * increment so it stays in the middle of the dead zone and return + * fail. + */ + if (cnt >= RCUREF_RELEASED) { + atomic_set(&ref->refcnt, RCUREF_DEAD); + return false; + } + + /* + * If it was saturated, warn and mark it so. In case the increment + * was already on a saturated value restore the saturation + * marker. This keeps it in the middle of the saturation zone and + * prevents the reference count from overflowing. This leaks the + * object memory, but prevents the obvious reference count overflow + * damage. + */ + if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory")) + atomic_set(&ref->refcnt, RCUREF_SATURATED); + return true; +} +EXPORT_SYMBOL_GPL(rcuref_get_slowpath); + +/** + * rcuref_put_slowpath - Slowpath of __rcuref_put() + * @ref: Pointer to the reference count + * + * Invoked when the reference count is outside of the valid zone. + * + * Return: + * True if this was the last reference with no future references + * possible. This signals the caller that it can safely schedule the + * object, which is protected by the reference counter, for + * deconstruction. + * + * False if there are still active references or the put() raced + * with a concurrent get()/put() pair. Caller is not allowed to + * deconstruct the protected object. + */ +bool rcuref_put_slowpath(rcuref_t *ref) +{ + unsigned int cnt = atomic_read(&ref->refcnt); + + /* Did this drop the last reference? */ + if (likely(cnt == RCUREF_NOREF)) { + /* + * Carefully try to set the reference count to RCUREF_DEAD. + * + * This can fail if a concurrent get() operation has + * elevated it again or the corresponding put() even marked + * it dead already. Both are valid situations and do not + * require a retry. If this fails the caller is not + * allowed to deconstruct the object. + */ + if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF) + return false; + + /* + * The caller can safely schedule the object for + * deconstruction. Provide acquire ordering. + */ + smp_acquire__after_ctrl_dep(); + return true; + } + + /* + * If the reference count was already in the dead zone, then this + * put() operation is imbalanced. Warn, put the reference count back to + * DEAD and tell the caller to not deconstruct the object. + */ + if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) { + atomic_set(&ref->refcnt, RCUREF_DEAD); + return false; + } + + /* + * This is a put() operation on a saturated refcount. Restore the + * mean saturation value and tell the caller to not deconstruct the + * object. + */ + if (cnt > RCUREF_MAXREF) + atomic_set(&ref->refcnt, RCUREF_SATURATED); + return false; +} +EXPORT_SYMBOL_GPL(rcuref_put_slowpath); diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c index 1fb56cf5e5ce..fd6153800e56 100644 --- a/lib/test_fprobe.c +++ b/lib/test_fprobe.c @@ -168,4 +168,3 @@ static struct kunit_suite fprobe_test_suite = { kunit_test_suites(&fprobe_test_suite); -MODULE_LICENSE("GPL"); diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index de4ee0d50906..cd2bdba6d3ed 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -334,7 +334,7 @@ kvfree_rcu_1_arg_vmalloc_test(void) return -1; p->array[0] = 'a'; - kvfree_rcu(p); + kvfree_rcu_mightsleep(p); } return 0; diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile index e814061d6aa0..9f031eafc465 100644 --- a/lib/vdso/Makefile +++ b/lib/vdso/Makefile @@ -5,18 +5,13 @@ GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH)) c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c) -# This cmd checks that the vdso library does not contain absolute relocation +# This cmd checks that the vdso library does not contain dynamic relocations. # It has to be called after the linking of the vdso library and requires it # as a parameter. # -# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds -# to the absolute relocation types printed by "objdump -R" and accepted by the -# dynamic linker. -ifndef ARCH_REL_TYPE_ABS -$(error ARCH_REL_TYPE_ABS is not set) -endif - +# As a workaround for some GNU ld ports which produce unneeded R_*_NONE +# dynamic relocations, ignore R_*_NONE. quiet_cmd_vdso_check = VDSOCHK $@ - cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \ + cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \ then (echo >&2 "$@: dynamic relocations are not supported"; \ rm -f $@; /bin/false); fi diff --git a/lib/vsprintf.c b/lib/vsprintf.c index be71a03c936a..426418253fd4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -3621,7 +3621,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) if (!digit || (base == 16 && !isxdigit(digit)) || (base == 10 && !isdigit(digit)) - || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 8 && !isodigit(digit)) || (base == 0 && !isdigit(digit))) break; |