summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile4
-rw-r--r--lib/alloc_tag.c1
-rwxr-xr-xlib/build_OID_registry4
-rw-r--r--lib/closure.c55
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/fortify_kunit.c3
-rw-r--r--lib/string_helpers_kunit.c1
-rw-r--r--lib/string_kunit.c1
-rw-r--r--lib/vdso/gettimeofday.c20
9 files changed, 80 insertions, 30 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 3b1769045651..30337431d10e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -426,3 +426,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
ifeq ($(CONFIG_FORTIFY_SOURCE),y)
$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
endif
+
+# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
+# Pass CFLAGS_KASAN to avoid warnings.
+$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y))
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index c347b8b72d78..81e5f9a70f22 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -239,7 +239,6 @@ static struct ctl_table memory_allocation_profiling_sysctls[] = {
#endif
.proc_handler = proc_do_static_key,
},
- { }
};
static void __init sysctl_init(void)
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index 56d8bafeb848..8267e8d71338 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -38,7 +38,9 @@ close IN_FILE || die;
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
-print C_FILE " * Automatically generated by ", $0 =~ s#^\Q$abs_srctree/\E##r, ". Do not edit\n";
+my $scriptname = $0;
+$scriptname =~ s#^\Q$abs_srctree/\E##;
+print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
print C_FILE " */\n";
#
diff --git a/lib/closure.c b/lib/closure.c
index 2e1ee9fdec08..116afae2eed9 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,7 +13,7 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
r &= ~CLOSURE_GUARD_MASK;
- if (!r) {
- smp_acquire__after_ctrl_dep();
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- WARN(flags & ~CLOSURE_DESTRUCTOR,
- "closure ref hit 0 with incorrect flags set: %x (%u)",
- flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
+ smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
struct closure_syncer s = { .task = current };
@@ -204,6 +244,9 @@ void closure_debug_destroy(struct closure *cl)
{
unsigned long flags;
+ if (cl->magic == CLOSURE_MAGIC_STACK)
+ return;
+
BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
cl->magic = CLOSURE_MAGIC_DEAD;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fb12a9bacd2f..7cea91e193a8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -78,16 +78,17 @@ static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static int debug_objects_maxchain __read_mostly;
-static int __maybe_unused debug_objects_maxchecked __read_mostly;
-static int debug_objects_fixups __read_mostly;
-static int debug_objects_warnings __read_mostly;
-static int debug_objects_enabled __read_mostly
- = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int debug_objects_pool_size __read_mostly
- = ODEBUG_POOL_SIZE;
-static int debug_objects_pool_min_level __read_mostly
- = ODEBUG_POOL_MIN_LEVEL;
+static int __data_racy debug_objects_maxchain __read_mostly;
+static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
+static int __data_racy debug_objects_fixups __read_mostly;
+static int __data_racy debug_objects_warnings __read_mostly;
+static int __data_racy debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+static int __data_racy debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int __data_racy debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
+
static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __ro_after_init;
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index e17d520f532c..27ea8bf0252c 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -910,10 +910,9 @@ static void fortify_test_##memfunc(struct kunit *test) \
memfunc(zero.buf, srcB, 0 + unconst); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- /* We currently explicitly ignore zero-sized dests. */ \
memfunc(zero.buf, srcB, 1 + unconst); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
- KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
}
__fortify_test(memcpy)
__fortify_test(memmove)
diff --git a/lib/string_helpers_kunit.c b/lib/string_helpers_kunit.c
index f88e39fd68d6..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/string_helpers_kunit.c
@@ -625,4 +625,5 @@ static struct kunit_suite string_helpers_test_suite = {
kunit_test_suites(&string_helpers_test_suite);
+MODULE_DESCRIPTION("Test cases for string helpers module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index 2a812decf14b..c919e3293da6 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -633,4 +633,5 @@ static struct kunit_suite string_test_suite = {
kunit_test_suites(&string_test_suite);
+MODULE_DESCRIPTION("Test cases for string functions");
MODULE_LICENSE("GPL v2");
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 899850bd6f0b..c01eaafd8041 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -140,14 +140,14 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
- * enabled tasks have a special VVAR page installed which
- * has vd->seq set to 1 and vd->clock_mode set to
- * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
- * this does not affect performance because if vd->seq is
- * odd, i.e. a concurrent update is in progress the extra
- * check for vd->clock_mode is just a few extra
- * instructions while spin waiting for vd->seq to become
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a
+ * special VVAR page installed which has vd->seq set to 1 and
+ * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time
+ * namespace affected tasks this does not affect performance
+ * because if vd->seq is odd, i.e. a concurrent update is in
+ * progress the extra check for vd->clock_mode is just a few
+ * extra instructions while spin waiting for vd->seq to become
* even again.
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
@@ -223,8 +223,8 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
- * do_hres().
+ * Open coded function vdso_read_begin() to handle
+ * VDSO_CLOCK_TIMENS. See comment in do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&