diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 4 | ||||
-rw-r--r-- | lib/Kconfig.debug | 19 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 4 | ||||
-rw-r--r-- | lib/Kconfig.kcsan | 26 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 5 | ||||
-rw-r--r-- | lib/Makefile | 25 | ||||
-rw-r--r-- | lib/cpumask.c | 16 | ||||
-rw-r--r-- | lib/crc-t10dif.c | 75 | ||||
-rw-r--r-- | lib/crypto/chacha20poly1305.c | 2 | ||||
-rw-r--r-- | lib/crypto/sha256.c | 10 | ||||
-rw-r--r-- | lib/debugobjects.c | 13 | ||||
-rw-r--r-- | lib/decompress.c | 5 | ||||
-rw-r--r-- | lib/decompress_unzstd.c | 345 | ||||
-rw-r--r-- | lib/math/div64.c | 41 | ||||
-rw-r--r-- | lib/mpi/Makefile | 1 | ||||
-rw-r--r-- | lib/mpi/mpi-sub-ui.c | 78 | ||||
-rw-r--r-- | lib/packing.c | 1 | ||||
-rw-r--r-- | lib/radix-tree.c | 2 | ||||
-rw-r--r-- | lib/random32.c | 2 | ||||
-rw-r--r-- | lib/rhashtable.c | 35 | ||||
-rw-r--r-- | lib/sbitmap.c | 3 | ||||
-rw-r--r-- | lib/seq_buf.c | 1 | ||||
-rw-r--r-- | lib/test-string_helpers.c | 67 | ||||
-rw-r--r-- | lib/test_fpu.c | 89 | ||||
-rw-r--r-- | lib/test_hmm.c | 3 | ||||
-rw-r--r-- | lib/test_lockup.c | 8 | ||||
-rw-r--r-- | lib/test_objagg.c | 4 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 103 | ||||
-rw-r--r-- | lib/vsprintf.c | 17 | ||||
-rw-r--r-- | lib/zstd/fse_decompress.c | 9 | ||||
-rw-r--r-- | lib/zstd/zstd_internal.h | 14 |
31 files changed, 916 insertions, 111 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index df3f3da95990..a5d6f23c4cab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -342,6 +342,10 @@ config DECOMPRESS_LZ4 select LZ4_DECOMPRESS tristate +config DECOMPRESS_ZSTD + select ZSTD_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d74ac0fd6b2d..3e64a8a809f9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -229,7 +229,6 @@ config DEBUG_INFO_COMPRESSED bool "Compressed debugging information" depends on DEBUG_INFO depends on $(cc-option,-gz=zlib) - depends on $(as-option,-Wa$(comma)--compress-debug-sections=zlib) depends on $(ld-option,--compress-debug-sections=zlib) help Compress the debug information using zlib. Requires GCC 5.0+ or Clang @@ -1118,6 +1117,7 @@ config PROVE_LOCKING select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC + select PREEMPT_COUNT if !ARCH_NO_PREEMPT select TRACE_IRQFLAGS default n help @@ -1326,11 +1326,17 @@ config WW_MUTEX_SELFTEST endmenu # lock debugging config TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_SUPPORT bool help Enables hooks to interrupt enabling and disabling for either tracing or lock debugging. +config TRACE_IRQFLAGS_NMI + def_bool y + depends on TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_NMI_SUPPORT + config STACKTRACE bool "Stack backtrace support" depends on STACKTRACE_SUPPORT @@ -2308,6 +2314,17 @@ config TEST_HMM If unsure, say N. +config TEST_FPU + tristate "Test floating point operations in kernel space" + depends on X86 && !KCOV_INSTRUMENT_ALL + help + Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu + which will trigger a sequence of floating point operations. This is used + for self-testing floating point control register setting in + kernel_fpu_begin(). + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 81f5464ea9e1..34b84bcbd3d9 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC config CC_HAS_KASAN_SW_TAGS def_bool $(cc-option, -fsanitize=kernel-hwaddress) +config CC_HAS_WORKING_NOSANITIZE_ADDRESS + def_bool !CC_IS_GCC || GCC_VERSION >= 80300 + config KASAN bool "KASAN: runtime memory debugger" depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS) depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS help Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 5ee88e5119c2..3d282d51849b 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -4,7 +4,8 @@ config HAVE_ARCH_KCSAN bool config HAVE_KCSAN_COMPILER - def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1) + def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \ + (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1)) help For the list of compilers that support KCSAN, please see <file:Documentation/dev-tools/kcsan.rst>. @@ -59,7 +60,28 @@ config KCSAN_SELFTEST bool "Perform short selftests on boot" default y help - Run KCSAN selftests on boot. On test failure, causes the kernel to panic. + Run KCSAN selftests on boot. On test failure, causes the kernel to + panic. Recommended to be enabled, ensuring critical functionality + works as intended. + +config KCSAN_TEST + tristate "KCSAN test for integrated runtime behaviour" + depends on TRACEPOINTS && KUNIT + select TORTURE_TEST + help + KCSAN test focusing on behaviour of the integrated runtime. Tests + various race scenarios, and verifies the reports generated to + console. Makes use of KUnit for test organization, and the Torture + framework for test thread control. + + Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS + milliseconds. Test run duration may be optimized by building the + kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower + than default value. + + Say Y here if you want the test to be built into the kernel and run + during boot; say M if you want the test to build as a module; say N + if you are unsure. config KCSAN_EARLY_ENABLE bool "Early enable during boot" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index ffa7a76de086..256f2486f9bd 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -3,6 +3,11 @@ config HAVE_ARCH_KGDB bool +# set if architecture has the its kgdb_arch_handle_qxfer_pkt +# function to enable gdb stub to address XML packet sent from GDB. +config HAVE_ARCH_KGDB_QXFER_PKT + bool + menuconfig KGDB bool "KGDB: kernel debugger" depends on HAVE_ARCH_KGDB diff --git a/lib/Makefile b/lib/Makefile index b1c42c10073b..7d24dd73e34c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -99,6 +99,30 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o obj-$(CONFIG_TEST_HMM) += test_hmm.o +# +# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns +# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS +# get appended last to CFLAGS and thus override those previous compiler options. +# +FPU_CFLAGS := -mhard-float -msse -msse2 +ifdef CONFIG_CC_IS_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 +# +# The "-msse" in the first argument is there so that the +# -mpreferred-stack-boundary=3 build error: +# +# -mpreferred-stack-boundary=3 is not between 4 and 12 +# +# can be triggered. Otherwise gcc doesn't complain. +FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4) +endif + +obj-$(CONFIG_TEST_FPU) += test_fpu.o +CFLAGS_test_fpu.o += $(FPU_CFLAGS) + obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ obj-$(CONFIG_KUNIT) += kunit/ @@ -170,6 +194,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o +lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/cpumask.c b/lib/cpumask.c index fb22fb266f93..85da6ab4fbb5 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -6,6 +6,7 @@ #include <linux/export.h> #include <linux/memblock.h> #include <linux/numa.h> +#include <linux/sched/isolation.h> /** * cpumask_next - get the next cpu in a cpumask @@ -205,22 +206,27 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) */ unsigned int cpumask_local_spread(unsigned int i, int node) { - int cpu; + int cpu, hk_flags; + const struct cpumask *mask; + hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ; + mask = housekeeping_cpumask(hk_flags); /* Wrap: we always want a cpu. */ - i %= num_online_cpus(); + i %= cpumask_weight(mask); if (node == NUMA_NO_NODE) { - for_each_cpu(cpu, cpu_online_mask) + for_each_cpu(cpu, mask) { if (i-- == 0) return cpu; + } } else { /* NUMA first. */ - for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + for_each_cpu_and(cpu, cpumask_of_node(node), mask) { if (i-- == 0) return cpu; + } - for_each_cpu(cpu, cpu_online_mask) { + for_each_cpu(cpu, mask) { /* Skip NUMA nodes, done above. */ if (cpumask_test_cpu(cpu, cpumask_of_node(node))) continue; diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..1ed2ed487097 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -17,64 +17,69 @@ #include <linux/notifier.h> static struct crypto_shash __rcu *crct10dif_tfm; -static struct static_key crct10dif_fallback __read_mostly; +static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback); static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || - static_key_false(&crct10dif_fallback) || - strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) - return 0; + strcmp(alg->cra_name, CRC_T10DIF_STRING)) + return NOTIFY_DONE; + + schedule_work(&crct10dif_rehash_work); + return NOTIFY_OK; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); - if (!old) { - mutex_unlock(&crc_t10dif_mutex); - return 0; - } - new = crypto_alloc_shash("crct10dif", 0, 0); + new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); - synchronize_rcu(); - crypto_free_shash(old); - return 0; + if (old) { + synchronize_rcu(); + crypto_free_shash(old); + } else { + static_branch_disable(&crct10dif_fallback); + } } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; - char ctx[2]; + __u16 crc; } desc; int err; - if (static_key_false(&crct10dif_fallback)) + if (static_branch_unlikely(&crct10dif_fallback)) return crc_t10dif_generic(crc, buffer, len); rcu_read_lock(); desc.shash.tfm = rcu_dereference(crct10dif_tfm); - *(__u16 *)desc.ctx = crc; - + desc.crc = crc; err = crypto_shash_update(&desc.shash, buffer, len); rcu_read_unlock(); BUG_ON(err); - return *(__u16 *)desc.ctx; + return desc.crc; } EXPORT_SYMBOL(crc_t10dif_update); @@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { - static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; - } + crc_t10dif_rehash(&crct10dif_rehash_work); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { - if (static_key_false(&crct10dif_fallback)) + struct crypto_shash *tfm; + int len; + + if (static_branch_unlikely(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + len = snprintf(buffer, PAGE_SIZE, "%s\n", + crypto_shash_driver_name(tfm)); + rcu_read_unlock(); + + return len; } -module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); +module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444); -MODULE_DESCRIPTION("T10 DIF CRC calculation"); +MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crct10dif"); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index ad0699ce702f..431e04280332 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -21,8 +21,6 @@ #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -bool __init chacha20poly1305_selftest(void); - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2e621697c5c3..2321f6cb322f 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out) } EXPORT_SYMBOL(sha224_final); +void sha256(const u8 *data, unsigned int len, u8 *out) +{ + struct sha256_state sctx; + + sha256_init(&sctx); + sha256_update(&sctx, data, len); + sha256_final(&sctx, out); +} +EXPORT_SYMBOL(sha256); + MODULE_LICENSE("GPL"); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 48054dbf1b51..fe4557955d97 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -1022,18 +1022,7 @@ static int debug_stats_show(struct seq_file *m, void *v) seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; } - -static int debug_stats_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, debug_stats_show, NULL); -} - -static const struct file_operations debug_stats_fops = { - .open = debug_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(debug_stats); static int __init debug_objects_init_debugfs(void) { diff --git a/lib/decompress.c b/lib/decompress.c index 857ab1af1ef3..ab3fc90ffc64 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -13,6 +13,7 @@ #include <linux/decompress/inflate.h> #include <linux/decompress/unlzo.h> #include <linux/decompress/unlz4.h> +#include <linux/decompress/unzstd.h> #include <linux/types.h> #include <linux/string.h> @@ -37,6 +38,9 @@ #ifndef CONFIG_DECOMPRESS_LZ4 # define unlz4 NULL #endif +#ifndef CONFIG_DECOMPRESS_ZSTD +# define unzstd NULL +#endif struct compress_format { unsigned char magic[2]; @@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = { { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, { {0x02, 0x21}, "lz4", unlz4 }, + { {0x28, 0xb5}, "zstd", unzstd }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c new file mode 100644 index 000000000000..0ad2c15479ed --- /dev/null +++ b/lib/decompress_unzstd.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Important notes about in-place decompression + * + * At least on x86, the kernel is decompressed in place: the compressed data + * is placed to the end of the output buffer, and the decompressor overwrites + * most of the compressed data. There must be enough safety margin to + * guarantee that the write position is always behind the read position. + * + * The safety margin for ZSTD with a 128 KB block size is calculated below. + * Note that the margin with ZSTD is bigger than with GZIP or XZ! + * + * The worst case for in-place decompression is that the beginning of + * the file is compressed extremely well, and the rest of the file is + * uncompressible. Thus, we must look for worst-case expansion when the + * compressor is encoding uncompressible data. + * + * The structure of the .zst file in case of a compresed kernel is as follows. + * Maximum sizes (as bytes) of the fields are in parenthesis. + * + * Frame Header: (18) + * Blocks: (N) + * Checksum: (4) + * + * The frame header and checksum overhead is at most 22 bytes. + * + * ZSTD stores the data in blocks. Each block has a header whose size is + * a 3 bytes. After the block header, there is up to 128 KB of payload. + * The maximum uncompressed size of the payload is 128 KB. The minimum + * uncompressed size of the payload is never less than the payload size + * (excluding the block header). + * + * The assumption, that the uncompressed size of the payload is never + * smaller than the payload itself, is valid only when talking about + * the payload as a whole. It is possible that the payload has parts where + * the decompressor consumes more input than it produces output. Calculating + * the worst case for this would be tricky. Instead of trying to do that, + * let's simply make sure that the decompressor never overwrites any bytes + * of the payload which it is currently reading. + * + * Now we have enough information to calculate the safety margin. We need + * - 22 bytes for the .zst file format headers; + * - 3 bytes per every 128 KiB of uncompressed size (one block header per + * block); and + * - 128 KiB (biggest possible zstd block size) to make sure that the + * decompressor never overwrites anything from the block it is currently + * reading. + * + * We get the following formula: + * + * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072 + * <= 22 + (uncompressed_size >> 15) + 131072 + */ + +/* + * Preboot environments #include "path/to/decompress_unzstd.c". + * All of the source files we depend on must be #included. + * zstd's only source dependeny is xxhash, which has no source + * dependencies. + * + * When UNZSTD_PREBOOT is defined we declare __decompress(), which is + * used for kernel decompression, instead of unzstd(). + * + * Define __DISABLE_EXPORTS in preboot environments to prevent symbols + * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro. + */ +#ifdef STATIC +# define UNZSTD_PREBOOT +# include "xxhash.c" +# include "zstd/entropy_common.c" +# include "zstd/fse_decompress.c" +# include "zstd/huf_decompress.c" +# include "zstd/zstd_common.c" +# include "zstd/decompress.c" +#endif + +#include <linux/decompress/mm.h> +#include <linux/kernel.h> +#include <linux/zstd.h> + +/* 128MB is the maximum window size supported by zstd. */ +#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX) +/* + * Size of the input and output buffers in multi-call mode. + * Pick a larger size because it isn't used during kernel decompression, + * since that is single pass, and we have to allocate a large buffer for + * zstd's window anyway. The larger size speeds up initramfs decompression. + */ +#define ZSTD_IOBUF_SIZE (1 << 17) + +static int INIT handle_zstd_error(size_t ret, void (*error)(char *x)) +{ + const int err = ZSTD_getErrorCode(ret); + + if (!ZSTD_isError(ret)) + return 0; + + switch (err) { + case ZSTD_error_memory_allocation: + error("ZSTD decompressor ran out of memory"); + break; + case ZSTD_error_prefix_unknown: + error("Input is not in the ZSTD format (wrong magic bytes)"); + break; + case ZSTD_error_dstSize_tooSmall: + case ZSTD_error_corruption_detected: + case ZSTD_error_checksum_wrong: + error("ZSTD-compressed data is corrupt"); + break; + default: + error("ZSTD-compressed data is probably corrupt"); + break; + } + return -1; +} + +/* + * Handle the case where we have the entire input and output in one segment. + * We can allocate less memory (no circular buffer for the sliding window), + * and avoid some memcpy() calls. + */ +static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf, + long out_len, long *in_pos, + void (*error)(char *x)) +{ + const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); + void *wksp = large_malloc(wksp_size); + ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size); + int err; + size_t ret; + + if (dctx == NULL) { + error("Out of memory while allocating ZSTD_DCtx"); + err = -1; + goto out; + } + /* + * Find out how large the frame actually is, there may be junk at + * the end of the frame that ZSTD_decompressDCtx() can't handle. + */ + ret = ZSTD_findFrameCompressedSize(in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + in_len = (long)ret; + + ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + + if (in_pos != NULL) + *in_pos = in_len; + + err = 0; +out: + if (wksp != NULL) + large_free(wksp); + return err; +} + +static int INIT __unzstd(unsigned char *in_buf, long in_len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *in_pos, + void (*error)(char *x)) +{ + ZSTD_inBuffer in; + ZSTD_outBuffer out; + ZSTD_frameParams params; + void *in_allocated = NULL; + void *out_allocated = NULL; + void *wksp = NULL; + size_t wksp_size; + ZSTD_DStream *dstream; + int err; + size_t ret; + + if (out_len == 0) + out_len = LONG_MAX; /* no limit */ + + if (fill == NULL && flush == NULL) + /* + * We can decompress faster and with less memory when we have a + * single chunk. + */ + return decompress_single(in_buf, in_len, out_buf, out_len, + in_pos, error); + + /* + * If in_buf is not provided, we must be using fill(), so allocate + * a large enough buffer. If it is provided, it must be at least + * ZSTD_IOBUF_SIZE large. + */ + if (in_buf == NULL) { + in_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (in_allocated == NULL) { + error("Out of memory while allocating input buffer"); + err = -1; + goto out; + } + in_buf = in_allocated; + in_len = 0; + } + /* Read the first chunk, since we need to decode the frame header. */ + if (fill != NULL) + in_len = fill(in_buf, ZSTD_IOBUF_SIZE); + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + /* Set the first non-empty input buffer. */ + in.src = in_buf; + in.pos = 0; + in.size = in_len; + /* Allocate the output buffer if we are using flush(). */ + if (flush != NULL) { + out_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (out_allocated == NULL) { + error("Out of memory while allocating output buffer"); + err = -1; + goto out; + } + out_buf = out_allocated; + out_len = ZSTD_IOBUF_SIZE; + } + /* Set the output buffer. */ + out.dst = out_buf; + out.pos = 0; + out.size = out_len; + + /* + * We need to know the window size to allocate the ZSTD_DStream. + * Since we are streaming, we need to allocate a buffer for the sliding + * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX + * (8 MB), so it is important to use the actual value so as not to + * waste memory when it is smaller. + */ + ret = ZSTD_getFrameParams(¶ms, in.src, in.size); + err = handle_zstd_error(ret, error); + if (err) + goto out; + if (ret != 0) { + error("ZSTD-compressed data has an incomplete frame header"); + err = -1; + goto out; + } + if (params.windowSize > ZSTD_WINDOWSIZE_MAX) { + error("ZSTD-compressed data has too large a window size"); + err = -1; + goto out; + } + + /* + * Allocate the ZSTD_DStream now that we know how much memory is + * required. + */ + wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize); + wksp = large_malloc(wksp_size); + dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size); + if (dstream == NULL) { + error("Out of memory while allocating ZSTD_DStream"); + err = -1; + goto out; + } + + /* + * Decompression loop: + * Read more data if necessary (error if no more data can be read). + * Call the decompression function, which returns 0 when finished. + * Flush any data produced if using flush(). + */ + if (in_pos != NULL) + *in_pos = 0; + do { + /* + * If we need to reload data, either we have fill() and can + * try to get more data, or we don't and the input is truncated. + */ + if (in.pos == in.size) { + if (in_pos != NULL) + *in_pos += in.pos; + in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1; + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + in.pos = 0; + in.size = in_len; + } + /* Returns zero when the frame is complete. */ + ret = ZSTD_decompressStream(dstream, &out, &in); + err = handle_zstd_error(ret, error); + if (err) + goto out; + /* Flush all of the data produced if using flush(). */ + if (flush != NULL && out.pos > 0) { + if (out.pos != flush(out.dst, out.pos)) { + error("Failed to flush()"); + err = -1; + goto out; + } + out.pos = 0; + } + } while (ret != 0); + + if (in_pos != NULL) + *in_pos += in.pos; + + err = 0; +out: + if (in_allocated != NULL) + large_free(in_allocated); + if (out_allocated != NULL) + large_free(out_allocated); + if (wksp != NULL) + large_free(wksp); + return err; +} + +#ifndef UNZSTD_PREBOOT +STATIC int INIT unzstd(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); +} +#else +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); +} +#endif diff --git a/lib/math/div64.c b/lib/math/div64.c index 368ca7fd0d82..3952a07130d8 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); + +#ifndef mul_u64_u64_div_u64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile index d5874a7f5ff9..43b8fce14079 100644 --- a/lib/mpi/Makefile +++ b/lib/mpi/Makefile @@ -16,6 +16,7 @@ mpi-y = \ mpicoder.o \ mpi-bit.o \ mpi-cmp.o \ + mpi-sub-ui.o \ mpih-cmp.o \ mpih-div.o \ mpih-mul.o \ diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c new file mode 100644 index 000000000000..b41b082b5f3e --- /dev/null +++ b/lib/mpi/mpi-sub-ui.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. + * + * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 + * Free Software Foundation, Inc. + * + * This file was based on the GNU MP Library source file: + * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h + * + * The GNU MP Library is free software; you can redistribute it and/or modify + * it under the terms of either: + * + * * the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your + * option) any later version. + * + * or + * + * * the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * or both in parallel, as here. + * + * The GNU MP Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received copies of the GNU General Public License and the + * GNU Lesser General Public License along with the GNU MP Library. If not, + * see https://www.gnu.org/licenses/. + */ + +#include "mpi-internal.h" + +int mpi_sub_ui(MPI w, MPI u, unsigned long vval) +{ + if (u->nlimbs == 0) { + if (mpi_resize(w, 1) < 0) + return -ENOMEM; + w->d[0] = vval; + w->nlimbs = (vval != 0); + w->sign = (vval != 0); + return 0; + } + + /* If not space for W (and possible carry), increase space. */ + if (mpi_resize(w, u->nlimbs + 1)) + return -ENOMEM; + + if (u->sign) { + mpi_limb_t cy; + + cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + w->d[u->nlimbs] = cy; + w->nlimbs = u->nlimbs + cy; + w->sign = 1; + } else { + /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. + */ + if (u->nlimbs == 1 && u->d[0] < vval) { + w->d[0] = vval - u->d[0]; + w->nlimbs = 1; + w->sign = 1; + } else { + mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + /* Size can decrease with at most one limb. */ + w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); + w->sign = 0; + } + } + + mpi_normalize(w); + return 0; +} +EXPORT_SYMBOL_GPL(mpi_sub_ui); diff --git a/lib/packing.c b/lib/packing.c index 50d1e9f2f5a7..6ed72dccfdb5 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, * @endbit: The index (in logical notation, compensated for quirks) where * the packed value ends within pbuf. Must be smaller than, or equal * to, startbit. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. * @op: If PACK, then uval will be treated as const pointer and copied (packed) * into pbuf, between startbit and endbit. * If UNPACK, then pbuf will be treated as const pointer and the logical diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 34e406fe561f..8e4a3a4397f2 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, { struct radix_tree_node *node, *parent; unsigned long maxindex; - int uninitialized_var(offset); + int offset; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..3d749abb9e80 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9f6890aedd1a..c949c1e3b87c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -31,7 +31,7 @@ union nested_table { union nested_table __rcu *table; - struct rhash_lock_head *bucket; + struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, @@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); @@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); + struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) @@ -485,7 +485,7 @@ fail: } static void *rhashtable_lookup_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { @@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return ERR_PTR(-ENOENT); } -static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, - struct bucket_table *tbl, - unsigned int hash, - struct rhash_head *obj, - void *data) +static struct bucket_table *rhashtable_insert_one( + struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, + struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, + void *data) { struct bucket_table *new_tbl; struct rhash_head *head; @@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; unsigned int hash; void *data; @@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); @@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(__rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { - static struct rhash_lock_head *rhnull; + static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); @@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index af88d1346dd7..267aa7709416 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -292,8 +292,11 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) for (i = 0; i < sb->map_nr; i++) { unsigned long word = READ_ONCE(sb->map[i].word); + unsigned long cleared = READ_ONCE(sb->map[i].cleared); unsigned int word_bits = READ_ONCE(sb->map[i].depth); + word &= ~cleared; + while (word_bits > 0) { unsigned int bits = min(8 - byte_bits, word_bits); diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 4e865d42ab03..707453f5d58e 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -91,6 +91,7 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) return ret; } +EXPORT_SYMBOL_GPL(seq_buf_printf); #ifdef CONFIG_BINARY_PRINTF /** diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c index 25b5cbfb7615..10360d4ea273 100644 --- a/lib/test-string_helpers.c +++ b/lib/test-string_helpers.c @@ -238,6 +238,28 @@ static const struct test_string_2 escape1[] __initconst = {{ /* terminator */ }}; +static const struct test_string strings_upper[] __initconst = { + { + .in = "abcdefgh1234567890test", + .out = "ABCDEFGH1234567890TEST", + }, + { + .in = "abCdeFgH1234567890TesT", + .out = "ABCDEFGH1234567890TEST", + }, +}; + +static const struct test_string strings_lower[] __initconst = { + { + .in = "ABCDEFGH1234567890TEST", + .out = "abcdefgh1234567890test", + }, + { + .in = "abCdeFgH1234567890TesT", + .out = "abcdefgh1234567890test", + }, +}; + static __init const char *test_string_find_match(const struct test_string_2 *s2, unsigned int flags) { @@ -390,6 +412,48 @@ static __init void test_string_get_size(void) test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); } +static void __init test_string_upper_lower(void) +{ + char *dst; + int i; + + for (i = 0; i < ARRAY_SIZE(strings_upper); i++) { + const char *s = strings_upper[i].in; + int len = strlen(strings_upper[i].in) + 1; + + dst = kmalloc(len, GFP_KERNEL); + if (!dst) + return; + + string_upper(dst, s); + if (memcmp(dst, strings_upper[i].out, len)) { + pr_warn("Test 'string_upper' failed : expected %s, got %s!\n", + strings_upper[i].out, dst); + kfree(dst); + return; + } + kfree(dst); + } + + for (i = 0; i < ARRAY_SIZE(strings_lower); i++) { + const char *s = strings_lower[i].in; + int len = strlen(strings_lower[i].in) + 1; + + dst = kmalloc(len, GFP_KERNEL); + if (!dst) + return; + + string_lower(dst, s); + if (memcmp(dst, strings_lower[i].out, len)) { + pr_warn("Test 'string_lower failed : : expected %s, got %s!\n", + strings_lower[i].out, dst); + kfree(dst); + return; + } + kfree(dst); + } +} + static int __init test_string_helpers_init(void) { unsigned int i; @@ -411,6 +475,9 @@ static int __init test_string_helpers_init(void) /* Test string_get_size() */ test_string_get_size(); + /* Test string upper(), string_lower() */ + test_string_upper_lower(); + return -EINVAL; } module_init(test_string_helpers_init); diff --git a/lib/test_fpu.c b/lib/test_fpu.c new file mode 100644 index 000000000000..c33764aa3eb8 --- /dev/null +++ b/lib/test_fpu.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Test cases for using floating point operations inside a kernel module. + * + * This tests kernel_fpu_begin() and kernel_fpu_end() functions, especially + * when userland has modified the floating point control registers. The kernel + * state might depend on the state set by the userland thread that was active + * before a syscall. + * + * To facilitate the test, this module registers file + * /sys/kernel/debug/selftest_helpers/test_fpu, which when read causes a + * sequence of floating point operations. If the operations fail, either the + * read returns error status or the kernel crashes. + * If the operations succeed, the read returns "1\n". + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <asm/fpu/api.h> + +static int test_fpu(void) +{ + /* + * This sequence of operations tests that rounding mode is + * to nearest and that denormal numbers are supported. + * Volatile variables are used to avoid compiler optimizing + * the calculations away. + */ + volatile double a, b, c, d, e, f, g; + + a = 4.0; + b = 1e-15; + c = 1e-310; + + /* Sets precision flag */ + d = a + b; + + /* Result depends on rounding mode */ + e = a + b / 2; + + /* Denormal and very large values */ + f = b / c; + + /* Depends on denormal support */ + g = a + c * f; + + if (d > a && e > a && g > a) + return 0; + else + return -EINVAL; +} + +static int test_fpu_get(void *data, u64 *val) +{ + int status = -EINVAL; + + kernel_fpu_begin(); + status = test_fpu(); + kernel_fpu_end(); + + *val = 1; + return status; +} + +DEFINE_SIMPLE_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n"); +static struct dentry *selftest_dir; + +static int __init test_fpu_init(void) +{ + selftest_dir = debugfs_create_dir("selftest_helpers", NULL); + if (!selftest_dir) + return -ENOMEM; + + debugfs_create_file("test_fpu", 0444, selftest_dir, NULL, + &test_fpu_fops); + + return 0; +} + +static void __exit test_fpu_exit(void) +{ + debugfs_remove(selftest_dir); +} + +module_init(test_fpu_init); +module_exit(test_fpu_exit); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 28528285942c..a2a82262b97b 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -520,8 +520,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, err_free: kfree(devmem); err_release: - release_mem_region(devmem->pagemap.res.start, - resource_size(&devmem->pagemap.res)); + release_mem_region(res->start, resource_size(res)); err: mutex_unlock(&mdevice->devmem_lock); return false; diff --git a/lib/test_lockup.c b/lib/test_lockup.c index f258743a0d83..ff26f36d729f 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -168,7 +168,7 @@ static int master_cpu; static void test_lock(bool master, bool verbose) { - u64 uninitialized_var(wait_start); + u64 wait_start; if (measure_lock_wait) wait_start = local_clock(); @@ -419,8 +419,8 @@ static bool test_kernel_ptr(unsigned long addr, int size) /* should be at least readable kernel address */ if (access_ok(ptr, 1) || access_ok(ptr + size - 1, 1) || - probe_kernel_address(ptr, buf) || - probe_kernel_address(ptr + size - 1, buf)) { + get_kernel_nofault(buf, ptr) || + get_kernel_nofault(buf, ptr + size - 1)) { pr_err("invalid kernel ptr: %#lx\n", addr); return true; } @@ -437,7 +437,7 @@ static bool __maybe_unused test_magic(unsigned long addr, int offset, if (!addr) return false; - if (probe_kernel_address(ptr, magic) || magic != expected) { + if (get_kernel_nofault(magic, ptr) || magic != expected) { pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n", addr, offset, magic, expected); return true; diff --git a/lib/test_objagg.c b/lib/test_objagg.c index 72c1abfa154d..da137939a410 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -979,10 +979,10 @@ err_check_expect_stats2: err_world2_obj_get: for (i--; i >= 0; i--) world_obj_put(&world2, objagg, hints_case->key_ids[i]); - objagg_hints_put(hints); - objagg_destroy(objagg2); i = hints_case->key_ids_count; + objagg_destroy(objagg2); err_check_expect_hints_stats: + objagg_hints_put(hints); err_hints_get: err_check_expect_stats: err_world_obj_get: diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index ddc9685702b1..5cf2fe9aab9e 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -15,6 +15,8 @@ #include <linux/delay.h> #include <linux/rwsem.h> #include <linux/mm.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> #define __param(type, name, init, msg) \ static type name = init; \ @@ -35,14 +37,18 @@ __param(int, test_loop_count, 1000000, __param(int, run_test_mask, INT_MAX, "Set tests specified in the mask.\n\n" - "\t\tid: 1, name: fix_size_alloc_test\n" - "\t\tid: 2, name: full_fit_alloc_test\n" - "\t\tid: 4, name: long_busy_list_alloc_test\n" - "\t\tid: 8, name: random_size_alloc_test\n" - "\t\tid: 16, name: fix_align_alloc_test\n" - "\t\tid: 32, name: random_size_align_alloc_test\n" - "\t\tid: 64, name: align_shift_alloc_test\n" - "\t\tid: 128, name: pcpu_alloc_test\n" + "\t\tid: 1, name: fix_size_alloc_test\n" + "\t\tid: 2, name: full_fit_alloc_test\n" + "\t\tid: 4, name: long_busy_list_alloc_test\n" + "\t\tid: 8, name: random_size_alloc_test\n" + "\t\tid: 16, name: fix_align_alloc_test\n" + "\t\tid: 32, name: random_size_align_alloc_test\n" + "\t\tid: 64, name: align_shift_alloc_test\n" + "\t\tid: 128, name: pcpu_alloc_test\n" + "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" + "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" + "\t\tid: 1024, name: kvfree_rcu_1_arg_slab_test\n" + "\t\tid: 2048, name: kvfree_rcu_2_arg_slab_test\n" /* Add a new test case description here. */ ); @@ -316,6 +322,83 @@ pcpu_alloc_test(void) return rv; } +struct test_kvfree_rcu { + struct rcu_head rcu; + unsigned char array[20]; +}; + +static int +kvfree_rcu_1_arg_vmalloc_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = vmalloc(1 * PAGE_SIZE); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p); + } + + return 0; +} + +static int +kvfree_rcu_2_arg_vmalloc_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = vmalloc(1 * PAGE_SIZE); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p, rcu); + } + + return 0; +} + +static int +kvfree_rcu_1_arg_slab_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p); + } + + return 0; +} + +static int +kvfree_rcu_2_arg_slab_test(void) +{ + struct test_kvfree_rcu *p; + int i; + + for (i = 0; i < test_loop_count; i++) { + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -1; + + p->array[0] = 'a'; + kvfree_rcu(p, rcu); + } + + return 0; +} + struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -330,6 +413,10 @@ static struct test_case_desc test_case_array[] = { { "random_size_align_alloc_test", random_size_align_alloc_test }, { "align_shift_alloc_test", align_shift_alloc_test }, { "pcpu_alloc_test", pcpu_alloc_test }, + { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, + { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, + { "kvfree_rcu_1_arg_slab_test", kvfree_rcu_1_arg_slab_test }, + { "kvfree_rcu_2_arg_slab_test", kvfree_rcu_2_arg_slab_test }, /* Add a new test case here. */ }; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 259e55895933..c155769559ab 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -381,6 +381,9 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width) #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ +static_assert(ZEROPAD == ('0' - ' ')); +static_assert(SMALL == ' '); + enum format_type { FORMAT_TYPE_NONE, /* Just a string part */ FORMAT_TYPE_WIDTH, @@ -507,7 +510,7 @@ char *number(char *buf, char *end, unsigned long long num, /* zero or space padding */ if (!(spec.flags & LEFT)) { char c = ' ' + (spec.flags & ZEROPAD); - BUILD_BUG_ON(' ' + ZEROPAD != '0'); + while (--field_width >= 0) { if (buf < end) *buf = c; @@ -1934,7 +1937,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr, names = vmaflag_names; break; case 'g': - flags = *(gfp_t *)flags_ptr; + flags = (__force unsigned long)(*(gfp_t *)flags_ptr); names = gfpflag_names; break; default: @@ -1976,12 +1979,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, char *buf_start = buf; struct property *prop; bool has_mult, pass; - static const struct printf_spec num_spec = { - .flags = SMALL, - .field_width = -1, - .precision = -1, - .base = 10, - }; struct printf_spec str_spec = spec; str_spec.field_width = -1; @@ -2021,7 +2018,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, str_spec.precision = precision; break; case 'p': /* phandle */ - buf = number(buf, end, (unsigned int)dn->phandle, num_spec); + buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec); break; case 'P': /* path-spec */ p = fwnode_get_name(of_fwnode_handle(dn)); @@ -2134,7 +2131,7 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode, * [4] or [6] and is able to print port [p], flowinfo [f], scope [s] * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I[6S]c' for IPv6 addresses printed as specified by - * http://tools.ietf.org/html/rfc5952 + * https://tools.ietf.org/html/rfc5952 * - 'E[achnops]' For an escaped buffer, where rules are defined by combination * of the following flags (see string_escape_mem() for the * details): diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c index a84300e5a013..0b353530fb3f 100644 --- a/lib/zstd/fse_decompress.c +++ b/lib/zstd/fse_decompress.c @@ -47,6 +47,7 @@ ****************************************************************/ #include "bitstream.h" #include "fse.h" +#include "zstd_internal.h" #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/string.h> /* memcpy, memset */ @@ -60,14 +61,6 @@ enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ } /* use only *after* variable declarations */ -/* check and forward error code */ -#define CHECK_F(f) \ - { \ - size_t const e = f; \ - if (FSE_isError(e)) \ - return e; \ - } - /* ************************************************************** * Templates ****************************************************************/ diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h index 1a79fab9e13a..dac753397f86 100644 --- a/lib/zstd/zstd_internal.h +++ b/lib/zstd/zstd_internal.h @@ -127,7 +127,14 @@ static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; * Shared functions to include for inlining *********************************************/ ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) { - memcpy(dst, src, 8); + /* + * zstd relies heavily on gcc being able to analyze and inline this + * memcpy() call, since it is called in a tight loop. Preboot mode + * is compiled in freestanding mode, which stops gcc from analyzing + * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a + * regular memcpy(). + */ + __builtin_memcpy(dst, src, 8); } /*! ZSTD_wildcopy() : * custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ @@ -137,13 +144,16 @@ ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length) const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; BYTE* const oend = op + length; - /* Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. +#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200 + /* + * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388. * Avoid the bad case where the loop only runs once by handling the * special case separately. This doesn't trigger the bug because it * doesn't involve pointer/integer overflow. */ if (length <= 8) return ZSTD_copy8(dst, src); +#endif do { ZSTD_copy8(op, ip); op += 8; |