diff options
Diffstat (limited to 'lib')
46 files changed, 2586 insertions, 409 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5d53f9609c25..df3f3da95990 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -19,6 +19,9 @@ config RAID6_PQ_BENCHMARK Benchmark all available RAID6 PQ functions on init and choose the fastest one. +config LINEAR_RANGES + tristate + config PACKING bool "Generic bitfield packing and unpacking" default n @@ -80,6 +83,9 @@ config ARCH_USE_CMPXCHG_LOCKREF config ARCH_HAS_FAST_MULTIPLIER bool +config ARCH_USE_SYM_ANNOTATIONS + bool + config INDIRECT_PIO bool "Access I/O in non-MMIO mode" depends on ARM64 @@ -427,7 +433,7 @@ config INTERVAL_TREE See: - Documentation/rbtree.txt + Documentation/core-api/rbtree.rst for more information. diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 971d6202c93a..90e16bfe27c7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -99,6 +99,7 @@ config DYNAMIC_DEBUG default n depends on PRINTK depends on (DEBUG_FS || PROC_FS) + select DYNAMIC_DEBUG_CORE help Compiles debug level messages into the kernel, which would not @@ -165,6 +166,17 @@ config DYNAMIC_DEBUG See Documentation/admin-guide/dynamic-debug-howto.rst for additional information. +config DYNAMIC_DEBUG_CORE + bool "Enable core function of dynamic debug support" + depends on PRINTK + depends on (DEBUG_FS || PROC_FS) + help + Enable core functional support of dynamic debug. It is useful + when you want to tie dynamic debug to your kernel modules with + DYNAMIC_DEBUG_MODULE defined for each of them, especially for + the case of embedded system where the kernel image size is + sensitive for people. + config SYMBOLIC_ERRNAME bool "Support symbolic error names in printf" default y if PRINTK @@ -213,6 +225,23 @@ config DEBUG_INFO_REDUCED DEBUG_INFO build and compile times are reduced too. Only works with newer gcc versions. +config DEBUG_INFO_COMPRESSED + bool "Compressed debugging information" + depends on DEBUG_INFO + depends on $(cc-option,-gz=zlib) + depends on $(as-option,-Wa$(comma)--compress-debug-sections=zlib) + depends on $(ld-option,--compress-debug-sections=zlib) + help + Compress the debug information using zlib. Requires GCC 5.0+ or Clang + 5.0+, binutils 2.26+, and zlib. + + Users of dpkg-deb via scripts/package/builddeb may find an increase in + size of their debug .deb packages with this config set, due to the + debug info being compressed with zlib, then the object files being + recompressed with a different compression scheme. But this is still + preferable to setting $KDEB_COMPRESS to "none" which would be even + larger. + config DEBUG_INFO_SPLIT bool "Produce split debuginfo in .dwo files" depends on DEBUG_INFO @@ -242,6 +271,8 @@ config DEBUG_INFO_DWARF4 config DEBUG_INFO_BTF bool "Generate BTF typeinfo" depends on DEBUG_INFO + depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED + depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -367,6 +398,11 @@ config STACK_VALIDATION For more information, see tools/objtool/Documentation/stack-validation.txt. +config VMLINUX_VALIDATION + bool + depends on STACK_VALIDATION && DEBUG_ENTRY && !PARAVIRT + default y + config DEBUG_FORCE_WEAK_PER_CPU bool "Force weak per-cpu definitions" depends on DEBUG_KERNEL @@ -651,6 +687,12 @@ config SCHED_STACK_END_CHECK data corruption or a sporadic crash at a later stage once the region is examined. The runtime overhead introduced is minimal. +config ARCH_HAS_DEBUG_VM_PGTABLE + bool + help + An architecture should select this when it can successfully + build and run DEBUG_VM_PGTABLE. + config DEBUG_VM bool "Debug VM" depends on DEBUG_KERNEL @@ -686,6 +728,22 @@ config DEBUG_VM_PGFLAGS If unsure, say N. +config DEBUG_VM_PGTABLE + bool "Debug arch page table for semantics compliance" + depends on MMU + depends on ARCH_HAS_DEBUG_VM_PGTABLE + default y if DEBUG_VM + help + This option provides a debug method which can be used to test + architecture page table helper functions on various platforms in + verifying if they comply with expected generic MM semantics. This + will help architecture code in making sure that any changes or + new additions of these helpers still conform to expected + semantics of the generic MM. Platforms will have to opt in for + this through ARCH_HAS_DEBUG_VM_PGTABLE. + + If unsure, say N. + config ARCH_HAS_DEBUG_VIRTUAL bool @@ -1508,7 +1566,7 @@ config PROVIDE_OHCI1394_DMA_INIT This code (~1k) is freed after boot. By then, the firewire stack in charge of the OHCI-1394 controllers should be used instead. - See Documentation/debugging-via-ohci1394.txt for more information. + See Documentation/core-api/debugging-via-ohci1394.rst for more information. source "samples/Kconfig" @@ -1769,6 +1827,15 @@ config KCOV_INSTRUMENT_ALL filesystem fuzzing with AFL) then you will want to enable coverage for more specific subsets of files, and should say n here. +config KCOV_IRQ_AREA_SIZE + hex "Size of interrupt coverage collection area in words" + depends on KCOV + default 0x40000 + help + KCOV uses preallocated per-cpu areas to collect coverage from + soft interrupts. This specifies the size of those areas in the + number of unsigned long words. + menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" def_bool y @@ -1986,6 +2053,19 @@ config TEST_LKM If unsure, say N. +config TEST_BITOPS + tristate "Test module for compilation of clear_bit/set_bit operations" + depends on m + help + This builds the "test_bitops" module that is much like the + TEST_LKM module except that it does a basic exercise of the + clear_bit and set_bit macros to make sure there are no compiler + warnings from C=1 sparse checker or -Wextra compilations. It has + no dependencies and doesn't run or load unless explicitly requested + by name. for example: modprobe test_bitops. + + If unsure, say N. + config TEST_VMALLOC tristate "Test module for stress/performance analysis of vmalloc allocator" default n @@ -2064,8 +2144,9 @@ config TEST_SYSCTL If unsure, say N. config SYSCTL_KUNIT_TEST - tristate "KUnit test for sysctl" + tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the proc sysctl unit test, which runs on boot. Tests the API contract and implementation correctness of sysctl. @@ -2075,8 +2156,9 @@ config SYSCTL_KUNIT_TEST If unsure, say N. config LIST_KUNIT_TEST - tristate "KUnit Test for Kernel Linked-list structures" + tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the linked list KUnit test suite. It tests that the API and basic functionality of the list_head type @@ -2092,6 +2174,18 @@ config LIST_KUNIT_TEST If unsure, say N. +config LINEAR_RANGES_TEST + tristate "KUnit test for linear_ranges" + depends on KUNIT + select LINEAR_RANGES + help + This builds the linear_ranges unit test, which runs on boot. + Tests the linear_ranges logic correctness. + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" help @@ -2201,6 +2295,19 @@ config TEST_MEMINIT If unsure, say N. +config TEST_HMM + tristate "Test HMM (Heterogeneous Memory Management)" + depends on TRANSPARENT_HUGEPAGE + depends on DEVICE_PRIVATE + select HMM_MIRROR + select MMU_NOTIFIER + help + This is a pseudo device driver solely for testing HMM. + Say M here if you want to build the HMM test module. + Doing so will allow you to run tools/testing/selftest/vm/hmm-tests. + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 933680b59e2d..ffa7a76de086 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -124,4 +124,22 @@ config KDB_CONTINUE_CATASTROPHIC CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot. If you are not sure, say 0. +config ARCH_HAS_EARLY_DEBUG + bool + default n + help + If an architecture can definitely handle entering the debugger + when early_param's are parsed then it select this config. + Otherwise, if "kgdbwait" is passed on the kernel command line it + won't actually be processed until dbg_late_init() just after the + call to kgdb_arch_late() is made. + + NOTE: Even if this isn't selected by an architecture we will + still try to register kgdb to handle breakpoints and crashes + when early_param's are parsed, we just won't act on the + "kgdbwait" parameter until dbg_late_init(). If you get a + crash and try to drop into kgdb somewhere between these two + places you might or might not end up being able to use kgdb + depending on exactly how far along the architecture has initted. + endif # KGDB diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 48469c95d78e..27bcc2568c95 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -60,18 +60,15 @@ config UBSAN_SANITIZE_ALL Enabling this option will get kernel image size increased significantly. -config UBSAN_NO_ALIGNMENT - bool "Disable checking of pointers alignment" - default y if HAVE_EFFICIENT_UNALIGNED_ACCESS +config UBSAN_ALIGNMENT + bool "Enable checks for pointers alignment" + default !HAVE_EFFICIENT_UNALIGNED_ACCESS + depends on !UBSAN_TRAP help - This option disables the check of unaligned memory accesses. - This option should be used when building allmodconfig. - Disabling this option on architectures that support unaligned + This option enables the check of unaligned memory accesses. + Enabling this option on architectures that support unaligned accesses may produce a lot of false positives. -config UBSAN_ALIGNMENT - def_bool !UBSAN_NO_ALIGNMENT - config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m diff --git a/lib/Makefile b/lib/Makefile index ab68a8674360..b1c42c10073b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -59,6 +59,8 @@ obj-y += kstrtox.o obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o +obj-$(CONFIG_TEST_BITOPS) += test_bitops.o +CFLAGS_test_bitops.o += -Werror obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_IDA) += test_ida.o @@ -95,6 +97,7 @@ obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o +obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ @@ -128,6 +131,7 @@ obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o obj-$(CONFIG_BITREVERSE) += bitrev.o +obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o obj-$(CONFIG_PACKING) += packing.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC16) += crc16.o @@ -189,7 +193,7 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o -obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o +obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o obj-$(CONFIG_NLATTR) += nlattr.o @@ -313,3 +317,4 @@ obj-$(CONFIG_OBJAGG) += objagg.o # KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o +obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/bch.c b/lib/bch.c index 052d3fb753a0..7c031ee8b93b 100644 --- a/lib/bch.c +++ b/lib/bch.c @@ -23,15 +23,15 @@ * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * - * Call init_bch to get a pointer to a newly allocated bch_control structure for + * Call bch_init to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * - * Call encode_bch to compute and store ecc parity bytes to a given buffer. - * Call decode_bch to detect and locate errors in received data. + * Call bch_encode to compute and store ecc parity bytes to a given buffer. + * Call bch_decode to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided - * to decode_bch in order to skip certain steps. See decode_bch() documentation + * to bch_decode in order to skip certain steps. See bch_decode() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of @@ -114,10 +114,53 @@ struct gf_poly_deg1 { unsigned int c[2]; }; +static u8 swap_bits_table[] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +}; + +static u8 swap_bits(struct bch_control *bch, u8 in) +{ + if (!bch->swap_bits) + return in; + + return swap_bits_table[in]; +} + /* - * same as encode_bch(), but process input data one byte at a time + * same as bch_encode(), but process input data one byte at a time */ -static void encode_bch_unaligned(struct bch_control *bch, +static void bch_encode_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { @@ -126,7 +169,9 @@ static void encode_bch_unaligned(struct bch_control *bch, const int l = BCH_ECC_WORDS(bch)-1; while (len--) { - p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); + u8 tmp = swap_bits(bch, *data++); + + p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); @@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) - dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; + dst[i] = ((u32)swap_bits(bch, src[0]) << 24) | + ((u32)swap_bits(bch, src[1]) << 16) | + ((u32)swap_bits(bch, src[2]) << 8) | + swap_bits(bch, src[3]); memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); - dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; + dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) | + ((u32)swap_bits(bch, pad[1]) << 16) | + ((u32)swap_bits(bch, pad[2]) << 8) | + swap_bits(bch, pad[3]); } /* @@ -161,20 +212,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { - *dst++ = (src[i] >> 24); - *dst++ = (src[i] >> 16) & 0xff; - *dst++ = (src[i] >> 8) & 0xff; - *dst++ = (src[i] >> 0) & 0xff; + *dst++ = swap_bits(bch, src[i] >> 24); + *dst++ = swap_bits(bch, src[i] >> 16); + *dst++ = swap_bits(bch, src[i] >> 8); + *dst++ = swap_bits(bch, src[i]); } - pad[0] = (src[nwords] >> 24); - pad[1] = (src[nwords] >> 16) & 0xff; - pad[2] = (src[nwords] >> 8) & 0xff; - pad[3] = (src[nwords] >> 0) & 0xff; + pad[0] = swap_bits(bch, src[nwords] >> 24); + pad[1] = swap_bits(bch, src[nwords] >> 16); + pad[2] = swap_bits(bch, src[nwords] >> 8); + pad[3] = swap_bits(bch, src[nwords]); memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** - * encode_bch - calculate BCH ecc parity of data + * bch_encode - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes @@ -187,7 +238,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; @@ -215,7 +266,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; - encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); + bch_encode_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } @@ -240,7 +291,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, */ while (mlen--) { /* input data is read in big-endian format */ - w = r[0]^cpu_to_be32(*pdata++); + w = cpu_to_be32(*pdata++); + if (bch->swap_bits) + w = (u32)swap_bits(bch, w) | + ((u32)swap_bits(bch, w >> 8) << 8) | + ((u32)swap_bits(bch, w >> 16) << 16) | + ((u32)swap_bits(bch, w >> 24) << 24); + w ^= r[0]; p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); @@ -255,13 +312,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, /* process last unaligned bytes */ if (len) - encode_bch_unaligned(bch, data, len, bch->ecc_buf); + bch_encode_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } -EXPORT_SYMBOL_GPL(encode_bch); +EXPORT_SYMBOL_GPL(bch_encode); static inline int modulo(struct bch_control *bch, unsigned int v) { @@ -952,7 +1009,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, #endif /* USE_CHIEN_SEARCH */ /** - * decode_bch - decode received codeword and find bit error locations + * bch_decode - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided @@ -966,22 +1023,22 @@ static int chien_search(struct bch_control *bch, unsigned int len, * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc - * separately (using encode_bch()), this function should be called with one of + * separately (using bch_encode()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: - * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) + * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: - * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: - * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: - * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) + * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * - * Once decode_bch() has successfully returned with a positive value, error + * Once bch_decode() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for @@ -993,7 +1050,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { @@ -1012,7 +1069,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; - encode_bch(bch, data, len, NULL); + bch_encode(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); @@ -1048,12 +1105,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, break; } errloc[i] = nbits-1-errloc[i]; - errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); + if (!bch->swap_bits) + errloc[i] = (errloc[i] & ~7) | + (7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } -EXPORT_SYMBOL_GPL(decode_bch); +EXPORT_SYMBOL_GPL(bch_decode); /* * generate Galois field lookup tables @@ -1236,27 +1295,29 @@ finish: } /** - * init_bch - initialize a BCH encoder/decoder + * bch_init - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) + * @swap_bits: swap bits within data and syndrome bytes * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical - * path. Usually, init_bch() should be called on module/driver init and - * free_bch() should be called to release memory on exit. + * path. Usually, bch_init() should be called on module/driver init and + * bch_free() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument - * @prim_poly, or let init_bch() use its default polynomial. + * @prim_poly, or let bch_init() use its default polynomial. * - * Once init_bch() has successfully returned a pointer to a newly allocated + * Once bch_init() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ -struct bch_control *init_bch(int m, int t, unsigned int prim_poly) +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits) { int err = 0; unsigned int i, words; @@ -1321,6 +1382,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); + bch->swap_bits = swap_bits; for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); @@ -1347,16 +1409,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) return bch; fail: - free_bch(bch); + bch_free(bch); return NULL; } -EXPORT_SYMBOL_GPL(init_bch); +EXPORT_SYMBOL_GPL(bch_init); /** - * free_bch - free the BCH control structure + * bch_free - free the BCH control structure * @bch: BCH control structure to release */ -void free_bch(struct bch_control *bch) +void bch_free(struct bch_control *bch) { unsigned int i; @@ -1377,7 +1439,7 @@ void free_bch(struct bch_control *bch) kfree(bch); } } -EXPORT_SYMBOL_GPL(free_bch); +EXPORT_SYMBOL_GPL(bch_free); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); diff --git a/lib/bitmap.c b/lib/bitmap.c index 89260aa342d6..21a7640c5eed 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -182,21 +182,22 @@ EXPORT_SYMBOL(__bitmap_shift_left); * * In pictures, example for a big-endian 32-bit architecture: * - * @src: - * 31 63 - * | | - * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101 - * | | | | - * 16 14 0 32 - * - * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is: - * - * 31 63 - * | | - * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010 - * | | | - * 14 (bit 17 0 32 - * from @src) + * The @src bitmap is:: + * + * 31 63 + * | | + * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101 + * | | | | + * 16 14 0 32 + * + * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is:: + * + * 31 63 + * | | + * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010 + * | | | + * 14 (bit 17 0 32 + * from @src) * * Note that @dst and @src might overlap partially or entirely. * diff --git a/lib/bug.c b/lib/bug.c index 8c98af0bf585..7103440c0ee1 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -47,6 +47,7 @@ #include <linux/bug.h> #include <linux/sched.h> #include <linux/rculist.h> +#include <linux/ftrace.h> extern struct bug_entry __start___bug_table[], __stop___bug_table[]; @@ -153,6 +154,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (!bug) return BUG_TRAP_TYPE_NONE; + disable_trace_on_warning(); + file = NULL; line = 0; warning = 0; diff --git a/lib/checksum.c b/lib/checksum.c index de032ad96f4a..7ac65a0000ff 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -146,26 +146,6 @@ __sum16 ip_compute_csum(const void *buff, int len) EXPORT_SYMBOL(ip_compute_csum); /* - * copy from fs while checksumming, otherwise like csum_partial - */ -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *csum_err) -{ - int missing; - - missing = __copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *csum_err = -EFAULT; - } else - *csum_err = 0; - - return csum_partial(dst, len, sum); -} -EXPORT_SYMBOL(csum_partial_copy_from_user); - -/* * copy from ds while checksumming, otherwise like csum_partial */ __wsum diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 65ead6b0c7e0..4ccbec442469 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -10,7 +10,6 @@ #include <linux/export.h> #include <linux/bitops.h> #include <linux/string.h> -#include <linux/cryptohash.h> #include <asm/unaligned.h> #include <crypto/chacha.h> diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 66cb04b0cf4e..2e621697c5c3 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -206,7 +206,7 @@ static void sha256_transform(u32 *state, const u8 *input) memzero_explicit(W, 64 * sizeof(u32)); } -int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) { unsigned int partial, done; const u8 *src; @@ -232,18 +232,16 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) partial = 0; } memcpy(sctx->buf + partial, src, len - done); - - return 0; } EXPORT_SYMBOL(sha256_update); -int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) { - return sha256_update(sctx, data, len); + sha256_update(sctx, data, len); } EXPORT_SYMBOL(sha224_update); -static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) +static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) { __be32 *dst = (__be32 *)out; __be64 bits; @@ -268,19 +266,17 @@ static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(*sctx)); - - return 0; } -int sha256_final(struct sha256_state *sctx, u8 *out) +void sha256_final(struct sha256_state *sctx, u8 *out) { - return __sha256_final(sctx, out, 8); + __sha256_final(sctx, out, 8); } EXPORT_SYMBOL(sha256_final); -int sha224_final(struct sha256_state *sctx, u8 *out) +void sha224_final(struct sha256_state *sctx, u8 *out) { - return __sha256_final(sctx, out, 7); + __sha256_final(sctx, out, 7); } EXPORT_SYMBOL(sha224_final); diff --git a/lib/debug_locks.c b/lib/debug_locks.c index a75ee30b77cb..06d3135bd184 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent); /* * Generic 'turn off all lock debugging' function: */ -int debug_locks_off(void) +noinstr int debug_locks_off(void) { if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 33ffbf308853..a00ee6eedc7c 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -74,7 +74,7 @@ void show_regs_print_info(const char *log_lvl) static void __dump_stack(void) { dump_stack_print_info(KERN_DEFAULT); - show_stack(NULL, NULL); + show_stack(NULL, NULL, KERN_DEFAULT); } /** diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 8f199f403ab5..321437bbf87d 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -1032,8 +1032,13 @@ static int __init dynamic_debug_init(void) int verbose_bytes = 0; if (&__start___verbose == &__stop___verbose) { - pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); - return 1; + if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) { + pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); + return 1; + } + pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n"); + ddebug_init_success = 1; + return 0; } iter = __start___verbose; modname = iter->modname; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 8186ca84910b..ce12621b4275 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -106,7 +106,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size) unsigned int fail_nth = READ_ONCE(current->fail_nth); if (fail_nth) { - if (!WRITE_ONCE(current->fail_nth, fail_nth - 1)) + fail_nth--; + WRITE_ONCE(current->fail_nth, fail_nth); + if (!fail_nth) goto fail; return false; diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 7852bfff50b1..451543937524 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -266,8 +266,7 @@ void __fprop_inc_percpu_max(struct fprop_global *p, if (numerator > (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT) return; - } else - fprop_reflect_period_percpu(p, pl); - percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); - percpu_counter_add(&p->events, 1); + } + + __fprop_inc_percpu(p, pl); } diff --git a/lib/ioremap.c b/lib/ioremap.c index 3f0e18543de8..5ee3526f71b8 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -12,7 +12,6 @@ #include <linux/io.h> #include <linux/export.h> #include <asm/cacheflush.h> -#include <asm/pgtable.h> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static int __read_mostly ioremap_p4d_capable; @@ -61,13 +60,14 @@ static inline int ioremap_pmd_enabled(void) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; pfn = phys_addr >> PAGE_SHIFT; - pte = pte_alloc_kernel(pmd, addr); + pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { @@ -75,6 +75,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; return 0; } @@ -101,21 +102,24 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, } static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; - pmd = pmd_alloc(&init_mm, pud, addr); + pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); - if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) + if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { + *mask |= PGTBL_PMD_MODIFIED; continue; + } - if (ioremap_pte_range(pmd, addr, next, phys_addr, prot)) + if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -144,21 +148,24 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, } static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; - pud = pud_alloc(&init_mm, p4d, addr); + pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); - if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) + if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) { + *mask |= PGTBL_PUD_MODIFIED; continue; + } - if (ioremap_pmd_range(pud, addr, next, phys_addr, prot)) + if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -187,21 +194,24 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, } static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; - p4d = p4d_alloc(&init_mm, pgd, addr); + p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); - if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) + if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { + *mask |= PGTBL_P4D_MODIFIED; continue; + } - if (ioremap_pud_range(p4d, addr, next, phys_addr, prot)) + if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; @@ -214,6 +224,7 @@ int ioremap_page_range(unsigned long addr, unsigned long start; unsigned long next; int err; + pgtbl_mod_mask mask = 0; might_sleep(); BUG_ON(addr >= end); @@ -222,13 +233,17 @@ int ioremap_page_range(unsigned long addr, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot); + err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot, + &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); flush_cache_vmap(start, end); + if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + arch_sync_kernel_mappings(start, end); + return err; } diff --git a/lib/kobject.c b/lib/kobject.c index 83198cb37d8d..1e4b7382a88e 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -6,7 +6,7 @@ * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2007 Novell Inc. * - * Please see the file Documentation/kobject.txt for critical information + * Please see the file Documentation/core-api/kobject.rst for critical information * about using the kobject interface. */ @@ -620,6 +620,13 @@ void kobject_del(struct kobject *kobj) if (ktype) sysfs_remove_groups(kobj, ktype->default_groups); + /* send "remove" if the caller did not do it but sent "add" */ + if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { + pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", + kobject_name(kobj), kobj); + kobject_uevent(kobj, KOBJ_REMOVE); + } + sysfs_remove_dir(kobj); sysfs_put(sd); @@ -670,16 +677,9 @@ static void kobject_cleanup(struct kobject *kobj) kobject_name(kobj), kobj, __func__, kobj->parent); if (t && !t->release) - pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n", + pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", kobject_name(kobj), kobj); - /* send "remove" if the caller did not do it but sent "add" */ - if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { - pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n", - kobject_name(kobj), kobj); - kobject_uevent(kobj, KOBJ_REMOVE); - } - /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 95d12e3d6d95..00909e6a2443 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -15,7 +15,8 @@ menuconfig KUNIT if KUNIT config KUNIT_DEBUGFS - bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" + bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enable debugfs representation for kunit. Currently this consists of /sys/kernel/debug/kunit/<test_suite>/results files for each @@ -23,7 +24,8 @@ config KUNIT_DEBUGFS run that occurred. config KUNIT_TEST - tristate "KUnit test for KUnit" + tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enables the unit tests for the KUnit test framework. These tests test the KUnit test framework itself; the tests are both written using @@ -32,7 +34,8 @@ config KUNIT_TEST expected. config KUNIT_EXAMPLE_TEST - tristate "Example test for KUnit" + tristate "Example test for KUnit" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS help Enables an example unit test that illustrates some of the basic features of KUnit. This test only exists to help new users understand @@ -41,4 +44,18 @@ config KUNIT_EXAMPLE_TEST is intended for curious hackers who would like to understand how to use KUnit for kernel development. +config KUNIT_ALL_TESTS + tristate "All KUnit tests with satisfied dependencies" + help + Enables all KUnit tests, if they can be enabled. + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + endif # KUNIT diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 7a6430a7fca0..ccb2ffad8dcf 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -93,7 +93,7 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * representation. */ if (suite) - pr_info("%s %zd - %s", + pr_info("%s %zd - %s\n", kunit_status_to_string(is_ok), test_number, description); else diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c new file mode 100644 index 000000000000..9495ef3572b7 --- /dev/null +++ b/lib/linear_ranges.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * helpers to map values in a linear range to range index + * + * Original idea borrowed from regulator framework + * + * It might be useful if we could support also inversely proportional ranges? + * Copyright 2020 ROHM Semiconductors + */ + +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/linear_range.h> +#include <linux/module.h> + +/** + * linear_range_values_in_range - return the amount of values in a range + * @r: pointer to linear range where values are counted + * + * Compute the amount of values in range pointed by @r. Note, values can + * be all equal - range with selectors 0,...,2 with step 0 still contains + * 3 values even though they are all equal. + * + * Return: the amount of values in range pointed by @r + */ +unsigned int linear_range_values_in_range(const struct linear_range *r) +{ + if (!r) + return 0; + return r->max_sel - r->min_sel + 1; +} +EXPORT_SYMBOL_GPL(linear_range_values_in_range); + +/** + * linear_range_values_in_range_array - return the amount of values in ranges + * @r: pointer to array of linear ranges where values are counted + * @ranges: amount of ranges we include in computation. + * + * Compute the amount of values in ranges pointed by @r. Note, values can + * be all equal - range with selectors 0,...,2 with step 0 still contains + * 3 values even though they are all equal. + * + * Return: the amount of values in first @ranges ranges pointed by @r + */ +unsigned int linear_range_values_in_range_array(const struct linear_range *r, + int ranges) +{ + int i, values_in_range = 0; + + for (i = 0; i < ranges; i++) { + int values; + + values = linear_range_values_in_range(&r[i]); + if (!values) + return values; + + values_in_range += values; + } + return values_in_range; +} +EXPORT_SYMBOL_GPL(linear_range_values_in_range_array); + +/** + * linear_range_get_max_value - return the largest value in a range + * @r: pointer to linear range where value is looked from + * + * Return: the largest value in the given range + */ +unsigned int linear_range_get_max_value(const struct linear_range *r) +{ + return r->min + (r->max_sel - r->min_sel) * r->step; +} +EXPORT_SYMBOL_GPL(linear_range_get_max_value); + +/** + * linear_range_get_value - fetch a value from given range + * @r: pointer to linear range where value is looked from + * @selector: selector for which the value is searched + * @val: address where found value is updated + * + * Search given ranges for value which matches given selector. + * + * Return: 0 on success, -EINVAL given selector is not found from any of the + * ranges. + */ +int linear_range_get_value(const struct linear_range *r, unsigned int selector, + unsigned int *val) +{ + if (r->min_sel > selector || r->max_sel < selector) + return -EINVAL; + + *val = r->min + (selector - r->min_sel) * r->step; + + return 0; +} +EXPORT_SYMBOL_GPL(linear_range_get_value); + +/** + * linear_range_get_value_array - fetch a value from array of ranges + * @r: pointer to array of linear ranges where value is looked from + * @ranges: amount of ranges in an array + * @selector: selector for which the value is searched + * @val: address where found value is updated + * + * Search through an array of ranges for value which matches given selector. + * + * Return: 0 on success, -EINVAL given selector is not found from any of the + * ranges. + */ +int linear_range_get_value_array(const struct linear_range *r, int ranges, + unsigned int selector, unsigned int *val) +{ + int i; + + for (i = 0; i < ranges; i++) + if (r[i].min_sel <= selector && r[i].max_sel >= selector) + return linear_range_get_value(&r[i], selector, val); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(linear_range_get_value_array); + +/** + * linear_range_get_selector_low - return linear range selector for value + * @r: pointer to linear range where selector is looked from + * @val: value for which the selector is searched + * @selector: address where found selector value is updated + * @found: flag to indicate that given value was in the range + * + * Return selector which which range value is closest match for given + * input value. Value is matching if it is equal or smaller than given + * value. If given value is in the range, then @found is set true. + * + * Return: 0 on success, -EINVAL if range is invalid or does not contain + * value smaller or equal to given value + */ +int linear_range_get_selector_low(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found) +{ + *found = false; + + if (r->min > val) + return -EINVAL; + + if (linear_range_get_max_value(r) < val) { + *selector = r->max_sel; + return 0; + } + + *found = true; + + if (r->step == 0) + *selector = r->min_sel; + else + *selector = (val - r->min) / r->step + r->min_sel; + + return 0; +} +EXPORT_SYMBOL_GPL(linear_range_get_selector_low); + +/** + * linear_range_get_selector_low_array - return linear range selector for value + * @r: pointer to array of linear ranges where selector is looked from + * @ranges: amount of ranges to scan from array + * @val: value for which the selector is searched + * @selector: address where found selector value is updated + * @found: flag to indicate that given value was in the range + * + * Scan array of ranges for selector which which range value matches given + * input value. Value is matching if it is equal or smaller than given + * value. If given value is found to be in a range scanning is stopped and + * @found is set true. If a range with values smaller than given value is found + * but the range max is being smaller than given value, then the ranges + * biggest selector is updated to @selector but scanning ranges is continued + * and @found is set to false. + * + * Return: 0 on success, -EINVAL if range array is invalid or does not contain + * range with a value smaller or equal to given value + */ +int linear_range_get_selector_low_array(const struct linear_range *r, + int ranges, unsigned int val, + unsigned int *selector, bool *found) +{ + int i; + int ret = -EINVAL; + + for (i = 0; i < ranges; i++) { + int tmpret; + + tmpret = linear_range_get_selector_low(&r[i], val, selector, + found); + if (!tmpret) + ret = 0; + + if (*found) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array); + +/** + * linear_range_get_selector_high - return linear range selector for value + * @r: pointer to linear range where selector is looked from + * @val: value for which the selector is searched + * @selector: address where found selector value is updated + * @found: flag to indicate that given value was in the range + * + * Return selector which which range value is closest match for given + * input value. Value is matching if it is equal or higher than given + * value. If given value is in the range, then @found is set true. + * + * Return: 0 on success, -EINVAL if range is invalid or does not contain + * value greater or equal to given value + */ +int linear_range_get_selector_high(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found) +{ + *found = false; + + if (linear_range_get_max_value(r) < val) + return -EINVAL; + + if (r->min > val) { + *selector = r->min_sel; + return 0; + } + + *found = true; + + if (r->step == 0) + *selector = r->max_sel; + else + *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel; + + return 0; +} +EXPORT_SYMBOL_GPL(linear_range_get_selector_high); + +MODULE_DESCRIPTION("linear-ranges helper"); +MODULE_LICENSE("GPL"); diff --git a/lib/logic_pio.c b/lib/logic_pio.c index f511a99bb389..f32fe481b492 100644 --- a/lib/logic_pio.c +++ b/lib/logic_pio.c @@ -229,13 +229,13 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr) } #if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE) -#define BUILD_LOGIC_IO(bw, type) \ -type logic_in##bw(unsigned long addr) \ +#define BUILD_LOGIC_IO(bwl, type) \ +type logic_in##bwl(unsigned long addr) \ { \ type ret = (type)~0; \ \ if (addr < MMIO_UPPER_LIMIT) { \ - ret = read##bw(PCI_IOBASE + addr); \ + ret = _in##bwl(addr); \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ @@ -248,10 +248,10 @@ type logic_in##bw(unsigned long addr) \ return ret; \ } \ \ -void logic_out##bw(type value, unsigned long addr) \ +void logic_out##bwl(type value, unsigned long addr) \ { \ if (addr < MMIO_UPPER_LIMIT) { \ - write##bw(value, PCI_IOBASE + addr); \ + _out##bwl(value, addr); \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ @@ -263,11 +263,11 @@ void logic_out##bw(type value, unsigned long addr) \ } \ } \ \ -void logic_ins##bw(unsigned long addr, void *buffer, \ - unsigned int count) \ +void logic_ins##bwl(unsigned long addr, void *buffer, \ + unsigned int count) \ { \ if (addr < MMIO_UPPER_LIMIT) { \ - reads##bw(PCI_IOBASE + addr, buffer, count); \ + reads##bwl(PCI_IOBASE + addr, buffer, count); \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ @@ -280,11 +280,11 @@ void logic_ins##bw(unsigned long addr, void *buffer, \ \ } \ \ -void logic_outs##bw(unsigned long addr, const void *buffer, \ - unsigned int count) \ +void logic_outs##bwl(unsigned long addr, const void *buffer, \ + unsigned int count) \ { \ if (addr < MMIO_UPPER_LIMIT) { \ - writes##bw(PCI_IOBASE + addr, buffer, count); \ + writes##bwl(PCI_IOBASE + addr, buffer, count); \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ diff --git a/lib/math/Kconfig b/lib/math/Kconfig index 15bd50d92308..f19bc9734fa7 100644 --- a/lib/math/Kconfig +++ b/lib/math/Kconfig @@ -6,7 +6,12 @@ config CORDIC calculations are in fixed point. Module will be called cordic. config PRIME_NUMBERS - tristate + tristate "Simple prime number generator for testing" + help + This option provides a simple prime number generator for test + modules. + + If unsure, say N. config RATIONAL bool diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c index 052f5b727be7..d42cebf7407f 100644 --- a/lib/math/prime_numbers.c +++ b/lib/math/prime_numbers.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -#define pr_fmt(fmt) "prime numbers: " fmt "\n" +#define pr_fmt(fmt) "prime numbers: " fmt #include <linux/module.h> #include <linux/mutex.h> @@ -253,7 +253,7 @@ static void dump_primes(void) if (buf) bitmap_print_to_pagebuf(true, buf, p->primes, p->sz); - pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s", + pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n", p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf); rcu_read_unlock(); @@ -273,7 +273,7 @@ static int selftest(unsigned long max) bool fast = is_prime_number(x); if (slow != fast) { - pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!", + pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n", x, slow ? "yes" : "no", fast ? "yes" : "no"); goto err; } @@ -282,14 +282,14 @@ static int selftest(unsigned long max) continue; if (next_prime_number(last) != x) { - pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu", + pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n", last, x, next_prime_number(last)); goto err; } last = x; } - pr_info("selftest(%lu) passed, last prime was %lu", x, last); + pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last); return 0; err: diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 2dceaca27489..afbd99987cf8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -653,7 +653,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. @@ -722,22 +722,22 @@ do { \ do { \ if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ @@ -747,36 +747,36 @@ do { \ do { \ if (__builtin_constant_p(ah) && (ah) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ @@ -787,7 +787,7 @@ do { \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype) ph) \ + : "=r" (ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ diff --git a/lib/nlattr.c b/lib/nlattr.c index cace9b307781..bc5b5cf608c4 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -44,8 +44,22 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_S64] = sizeof(s64), }; +/* + * Nested policies might refer back to the original + * policy in some cases, and userspace could try to + * abuse that and recurse by nesting in the right + * ways. Limit recursion to avoid this problem. + */ +#define MAX_POLICY_RECURSION_DEPTH 10 + +static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy, + unsigned int validate, + struct netlink_ext_ack *extack, + struct nlattr **tb, unsigned int depth); + static int validate_nla_bitfield32(const struct nlattr *nla, - const u32 *valid_flags_mask) + const u32 valid_flags_mask) { const struct nla_bitfield32 *bf = nla_data(nla); @@ -53,11 +67,11 @@ static int validate_nla_bitfield32(const struct nlattr *nla, return -EINVAL; /*disallow invalid bit selector */ - if (bf->selector & ~*valid_flags_mask) + if (bf->selector & ~valid_flags_mask) return -EINVAL; /*disallow invalid bit values */ - if (bf->value & ~*valid_flags_mask) + if (bf->value & ~valid_flags_mask) return -EINVAL; /*disallow valid bit values that are not selected*/ @@ -70,7 +84,7 @@ static int validate_nla_bitfield32(const struct nlattr *nla, static int nla_validate_array(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack, - unsigned int validate) + unsigned int validate, unsigned int depth) { const struct nlattr *entry; int rem; @@ -87,8 +101,9 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype, return -ERANGE; } - ret = __nla_validate(nla_data(entry), nla_len(entry), - maxtype, policy, validate, extack); + ret = __nla_validate_parse(nla_data(entry), nla_len(entry), + maxtype, policy, validate, extack, + NULL, depth + 1); if (ret < 0) return ret; } @@ -96,17 +111,58 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype, return 0; } -static int nla_validate_int_range(const struct nla_policy *pt, - const struct nlattr *nla, - struct netlink_ext_ack *extack) +void nla_get_range_unsigned(const struct nla_policy *pt, + struct netlink_range_validation *range) { - bool validate_min, validate_max; - s64 value; + WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR && + (pt->min < 0 || pt->max < 0)); - validate_min = pt->validation_type == NLA_VALIDATE_RANGE || - pt->validation_type == NLA_VALIDATE_MIN; - validate_max = pt->validation_type == NLA_VALIDATE_RANGE || - pt->validation_type == NLA_VALIDATE_MAX; + range->min = 0; + + switch (pt->type) { + case NLA_U8: + range->max = U8_MAX; + break; + case NLA_U16: + range->max = U16_MAX; + break; + case NLA_U32: + range->max = U32_MAX; + break; + case NLA_U64: + case NLA_MSECS: + range->max = U64_MAX; + break; + default: + WARN_ON_ONCE(1); + return; + } + + switch (pt->validation_type) { + case NLA_VALIDATE_RANGE: + range->min = pt->min; + range->max = pt->max; + break; + case NLA_VALIDATE_RANGE_PTR: + *range = *pt->range; + break; + case NLA_VALIDATE_MIN: + range->min = pt->min; + break; + case NLA_VALIDATE_MAX: + range->max = pt->max; + break; + default: + break; + } +} + +static int nla_validate_int_range_unsigned(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + struct netlink_range_validation range; + u64 value; switch (pt->type) { case NLA_U8: @@ -118,6 +174,77 @@ static int nla_validate_int_range(const struct nla_policy *pt, case NLA_U32: value = nla_get_u32(nla); break; + case NLA_U64: + case NLA_MSECS: + value = nla_get_u64(nla); + break; + default: + return -EINVAL; + } + + nla_get_range_unsigned(pt, &range); + + if (value < range.min || value > range.max) { + NL_SET_ERR_MSG_ATTR(extack, nla, + "integer out of range"); + return -ERANGE; + } + + return 0; +} + +void nla_get_range_signed(const struct nla_policy *pt, + struct netlink_range_validation_signed *range) +{ + switch (pt->type) { + case NLA_S8: + range->min = S8_MIN; + range->max = S8_MAX; + break; + case NLA_S16: + range->min = S16_MIN; + range->max = S16_MAX; + break; + case NLA_S32: + range->min = S32_MIN; + range->max = S32_MAX; + break; + case NLA_S64: + range->min = S64_MIN; + range->max = S64_MAX; + break; + default: + WARN_ON_ONCE(1); + return; + } + + switch (pt->validation_type) { + case NLA_VALIDATE_RANGE: + range->min = pt->min; + range->max = pt->max; + break; + case NLA_VALIDATE_RANGE_PTR: + *range = *pt->range_signed; + break; + case NLA_VALIDATE_MIN: + range->min = pt->min; + break; + case NLA_VALIDATE_MAX: + range->max = pt->max; + break; + default: + break; + } +} + +static int nla_validate_int_range_signed(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + struct netlink_range_validation_signed range; + s64 value; + + switch (pt->type) { case NLA_S8: value = nla_get_s8(nla); break; @@ -130,22 +257,13 @@ static int nla_validate_int_range(const struct nla_policy *pt, case NLA_S64: value = nla_get_s64(nla); break; - case NLA_U64: - /* treat this one specially, since it may not fit into s64 */ - if ((validate_min && nla_get_u64(nla) < pt->min) || - (validate_max && nla_get_u64(nla) > pt->max)) { - NL_SET_ERR_MSG_ATTR(extack, nla, - "integer out of range"); - return -ERANGE; - } - return 0; default: - WARN_ON(1); return -EINVAL; } - if ((validate_min && value < pt->min) || - (validate_max && value > pt->max)) { + nla_get_range_signed(pt, &range); + + if (value < range.min || value > range.max) { NL_SET_ERR_MSG_ATTR(extack, nla, "integer out of range"); return -ERANGE; @@ -154,9 +272,31 @@ static int nla_validate_int_range(const struct nla_policy *pt, return 0; } +static int nla_validate_int_range(const struct nla_policy *pt, + const struct nlattr *nla, + struct netlink_ext_ack *extack) +{ + switch (pt->type) { + case NLA_U8: + case NLA_U16: + case NLA_U32: + case NLA_U64: + case NLA_MSECS: + return nla_validate_int_range_unsigned(pt, nla, extack); + case NLA_S8: + case NLA_S16: + case NLA_S32: + case NLA_S64: + return nla_validate_int_range_signed(pt, nla, extack); + default: + WARN_ON(1); + return -EINVAL; + } +} + static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy, unsigned int validate, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, unsigned int depth) { u16 strict_start_type = policy[0].strict_start_type; const struct nla_policy *pt; @@ -174,7 +314,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype, BUG_ON(pt->type > NLA_TYPE_MAX); if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) || - (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) { + (pt->type == NLA_EXACT_LEN && + pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG && + attrlen != pt->len)) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); if (validate & NL_VALIDATE_STRICT_ATTRS) { @@ -200,15 +342,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype, } switch (pt->type) { - case NLA_EXACT_LEN: - if (attrlen != pt->len) - goto out_err; - break; - case NLA_REJECT: - if (extack && pt->validation_data) { + if (extack && pt->reject_message) { NL_SET_BAD_ATTR(extack, nla); - extack->_msg = pt->validation_data; + extack->_msg = pt->reject_message; return -EINVAL; } err = -EINVAL; @@ -223,7 +360,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (attrlen != sizeof(struct nla_bitfield32)) goto out_err; - err = validate_nla_bitfield32(nla, pt->validation_data); + err = validate_nla_bitfield32(nla, pt->bitfield32_valid); if (err) goto out_err; break; @@ -268,10 +405,11 @@ static int validate_nla(const struct nlattr *nla, int maxtype, break; if (attrlen < NLA_HDRLEN) goto out_err; - if (pt->validation_data) { - err = __nla_validate(nla_data(nla), nla_len(nla), pt->len, - pt->validation_data, validate, - extack); + if (pt->nested_policy) { + err = __nla_validate_parse(nla_data(nla), nla_len(nla), + pt->len, pt->nested_policy, + validate, extack, NULL, + depth + 1); if (err < 0) { /* * return directly to preserve the inner @@ -289,12 +427,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype, break; if (attrlen < NLA_HDRLEN) goto out_err; - if (pt->validation_data) { + if (pt->nested_policy) { int err; err = nla_validate_array(nla_data(nla), nla_len(nla), - pt->len, pt->validation_data, - extack, validate); + pt->len, pt->nested_policy, + extack, validate, depth); if (err < 0) { /* * return directly to preserve the inner @@ -317,6 +455,13 @@ static int validate_nla(const struct nlattr *nla, int maxtype, goto out_err; break; + case NLA_EXACT_LEN: + if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) { + if (attrlen != pt->len) + goto out_err; + break; + } + /* fall through */ default: if (pt->len) minlen = pt->len; @@ -332,6 +477,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, case NLA_VALIDATE_NONE: /* nothing to do */ break; + case NLA_VALIDATE_RANGE_PTR: case NLA_VALIDATE_RANGE: case NLA_VALIDATE_MIN: case NLA_VALIDATE_MAX: @@ -358,11 +504,17 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, - struct nlattr **tb) + struct nlattr **tb, unsigned int depth) { const struct nlattr *nla; int rem; + if (depth >= MAX_POLICY_RECURSION_DEPTH) { + NL_SET_ERR_MSG(extack, + "allowed policy recursion depth exceeded"); + return -EINVAL; + } + if (tb) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); @@ -379,7 +531,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, } if (policy) { int err = validate_nla(nla, maxtype, policy, - validate, extack); + validate, extack, depth); if (err < 0) return err; @@ -421,7 +573,7 @@ int __nla_validate(const struct nlattr *head, int len, int maxtype, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, - extack, NULL); + extack, NULL, 0); } EXPORT_SYMBOL(__nla_validate); @@ -476,7 +628,7 @@ int __nla_parse(struct nlattr **tb, int maxtype, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, - extack, tb); + extack, tb, 0); } EXPORT_SYMBOL(__nla_parse); diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 8d092609928e..0ba686b8fe57 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -#define pr_fmt(fmt) "%s: " fmt "\n", __func__ +#define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/sched.h> @@ -141,8 +141,8 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) for_each_possible_cpu(cpu) count += *per_cpu_ptr(percpu_count, cpu); - pr_debug("global %ld percpu %ld", - atomic_long_read(&ref->count), (long)count); + pr_debug("global %lu percpu %lu\n", + atomic_long_read(&ref->count), count); /* * It's crucial that we sum the percpu counters _before_ adding the sum diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 2ee6ae3b0ade..34e406fe561f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> +#include <linux/local_lock.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> @@ -27,7 +28,6 @@ #include <linux/string.h> #include <linux/xarray.h> - /* * Radix tree node cache. */ @@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep; /* * Per-cpu pool of preloaded nodes */ -struct radix_tree_preload { - unsigned nr; - /* nodes->parent points to next preallocated node */ - struct radix_tree_node *nodes; +DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { + .lock = INIT_LOCAL_LOCK(lock), }; -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); static inline struct radix_tree_node *entry_to_node(void *ptr) { @@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; @@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(&radix_tree_preloads.lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged); void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) - preempt_disable(); + local_lock(&radix_tree_preloads.lock); } EXPORT_SYMBOL(idr_preload); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index bdb7e4cadf05..9f6890aedd1a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #define ASSERT_RHT_MUTEX(HT) #endif +static inline union nested_table *nested_table_top( + const struct bucket_table *tbl) +{ + /* The top-level bucket entry does not need RCU protection + * because it's set at the same time as tbl->nest. + */ + return (void *)rcu_dereference_protected(tbl->buckets[0], 1); +} + static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; - ntbl = rcu_dereference_raw(ntbl->table); + ntbl = rcu_dereference_protected(ntbl->table, 1); if (!ntbl) return; @@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl) union nested_table *ntbl; unsigned int i; - ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = nested_table_top(tbl); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); @@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int subhash = hash; union nested_table *ntbl; - ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = nested_table_top(tbl); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; @@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; - ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = nested_table_top(tbl); hash >>= tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); diff --git a/lib/sha1.c b/lib/sha1.c index 1d96d2c02b82..49257a915bb6 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -9,7 +9,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/bitops.h> -#include <linux/cryptohash.h> +#include <crypto/sha.h> #include <asm/unaligned.h> /* @@ -64,22 +64,24 @@ #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) /** - * sha_transform - single block SHA1 transform + * sha1_transform - single block SHA1 transform (deprecated) * * @digest: 160 bit digest to update * @data: 512 bits of data to hash * @array: 16 words of workspace (see note) * - * This function generates a SHA1 digest for a single 512-bit block. - * Be warned, it does not handle padding and message digest, do not - * confuse it with the full FIPS 180-1 digest algorithm for variable - * length messages. + * This function executes SHA-1's internal compression function. It updates the + * 160-bit internal state (@digest) with a single 512-bit data block (@data). + * + * Don't use this function. SHA-1 is no longer considered secure. And even if + * you do have to use SHA-1, this isn't the correct way to hash something with + * SHA-1 as this doesn't handle padding and finalization. * * Note: If the hash is security sensitive, the caller should be sure * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ -void sha_transform(__u32 *digest, const char *data, __u32 *array) +void sha1_transform(__u32 *digest, const char *data, __u32 *array) { __u32 A, B, C, D, E; @@ -185,13 +187,13 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array) digest[3] += D; digest[4] += E; } -EXPORT_SYMBOL(sha_transform); +EXPORT_SYMBOL(sha1_transform); /** - * sha_init - initialize the vectors for a SHA1 digest + * sha1_init - initialize the vectors for a SHA1 digest * @buf: vector to initialize */ -void sha_init(__u32 *buf) +void sha1_init(__u32 *buf) { buf[0] = 0x67452301; buf[1] = 0xefcdab89; @@ -199,4 +201,4 @@ void sha_init(__u32 *buf) buf[3] = 0x10325476; buf[4] = 0xc3d2e1f0; } -EXPORT_SYMBOL(sha_init); +EXPORT_SYMBOL(sha1_init); diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 706020b06617..34696a348864 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -98,6 +98,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) { unsigned long max_addr, src_addr; + might_fault(); if (unlikely(count <= 0)) return 0; @@ -116,9 +117,9 @@ long strncpy_from_user(char *dst, const char __user *src, long count) kasan_check_write(dst, count); check_object_size(dst, count, false); - if (user_access_begin(src, max)) { + if (user_read_access_begin(src, max)) { retval = do_strncpy_from_user(dst, src, count, max); - user_access_end(); + user_read_access_end(); return retval; } } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 41670d4a5816..1616710b8a82 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -109,9 +109,9 @@ long strnlen_user(const char __user *str, long count) if (max > count) max = count; - if (user_access_begin(str, max)) { + if (user_read_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); - user_access_end(); + user_read_access_end(); return retval; } } diff --git a/lib/test_bitops.c b/lib/test_bitops.c new file mode 100644 index 000000000000..fd50b3ae4a14 --- /dev/null +++ b/lib/test_bitops.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Intel Corporation + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/printk.h> + +/* a tiny module only meant to test set/clear_bit */ + +/* use an enum because thats the most common BITMAP usage */ +enum bitops_fun { + BITOPS_4 = 4, + BITOPS_7 = 7, + BITOPS_11 = 11, + BITOPS_31 = 31, + BITOPS_88 = 88, + BITOPS_LAST = 255, + BITOPS_LENGTH = 256 +}; + +static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH); + +static int __init test_bitops_startup(void) +{ + pr_warn("Loaded test module\n"); + set_bit(BITOPS_4, g_bitmap); + set_bit(BITOPS_7, g_bitmap); + set_bit(BITOPS_11, g_bitmap); + set_bit(BITOPS_31, g_bitmap); + set_bit(BITOPS_88, g_bitmap); + return 0; +} + +static void __exit test_bitops_unstartup(void) +{ + int bit_set; + + clear_bit(BITOPS_4, g_bitmap); + clear_bit(BITOPS_7, g_bitmap); + clear_bit(BITOPS_11, g_bitmap); + clear_bit(BITOPS_31, g_bitmap); + clear_bit(BITOPS_88, g_bitmap); + + bit_set = find_first_bit(g_bitmap, BITOPS_LAST); + if (bit_set != BITOPS_LAST) + pr_err("ERROR: FOUND SET BIT %d\n", bit_set); + + pr_warn("Unloaded test module\n"); +} + +module_init(test_bitops_startup); +module_exit(test_bitops_unstartup); + +MODULE_AUTHOR("Jesse Brandeburg <jesse.brandeburg@intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Bit testing module"); diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 0c7fbcf07ac5..9fee2b93a8d1 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -310,27 +310,13 @@ static int test_dev_config_update_bool(const char *buf, size_t size, return ret; } -static ssize_t -test_dev_config_show_bool(char *buf, - bool config) +static ssize_t test_dev_config_show_bool(char *buf, bool val) { - bool val; - - mutex_lock(&test_fw_mutex); - val = config; - mutex_unlock(&test_fw_mutex); - return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static ssize_t test_dev_config_show_int(char *buf, int cfg) +static ssize_t test_dev_config_show_int(char *buf, int val) { - int val; - - mutex_lock(&test_fw_mutex); - val = cfg; - mutex_unlock(&test_fw_mutex); - return snprintf(buf, PAGE_SIZE, "%d\n", val); } @@ -354,14 +340,8 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) return size; } -static ssize_t test_dev_config_show_u8(char *buf, u8 cfg) +static ssize_t test_dev_config_show_u8(char *buf, u8 val) { - u8 val; - - mutex_lock(&test_fw_mutex); - val = cfg; - mutex_unlock(&test_fw_mutex); - return snprintf(buf, PAGE_SIZE, "%u\n", val); } diff --git a/lib/test_hmm.c b/lib/test_hmm.c new file mode 100644 index 000000000000..28528285942c --- /dev/null +++ b/lib/test_hmm.c @@ -0,0 +1,1164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is a module to test the HMM (Heterogeneous Memory Management) + * mirror and zone device private memory migration APIs of the kernel. + * Userspace programs can register with the driver to mirror their own address + * space and can use the device to read/write any valid virtual address. + */ +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/delay.h> +#include <linux/pagemap.h> +#include <linux/hmm.h> +#include <linux/vmalloc.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/sched/mm.h> +#include <linux/platform_device.h> + +#include "test_hmm_uapi.h" + +#define DMIRROR_NDEVICES 2 +#define DMIRROR_RANGE_FAULT_TIMEOUT 1000 +#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) +#define DEVMEM_CHUNKS_RESERVE 16 + +static const struct dev_pagemap_ops dmirror_devmem_ops; +static const struct mmu_interval_notifier_ops dmirror_min_ops; +static dev_t dmirror_dev; +static struct page *dmirror_zero_page; + +struct dmirror_device; + +struct dmirror_bounce { + void *ptr; + unsigned long size; + unsigned long addr; + unsigned long cpages; +}; + +#define DPT_XA_TAG_WRITE 3UL + +/* + * Data structure to track address ranges and register for mmu interval + * notifier updates. + */ +struct dmirror_interval { + struct mmu_interval_notifier notifier; + struct dmirror *dmirror; +}; + +/* + * Data attached to the open device file. + * Note that it might be shared after a fork(). + */ +struct dmirror { + struct dmirror_device *mdevice; + struct xarray pt; + struct mmu_interval_notifier notifier; + struct mutex mutex; +}; + +/* + * ZONE_DEVICE pages for migration and simulating device memory. + */ +struct dmirror_chunk { + struct dev_pagemap pagemap; + struct dmirror_device *mdevice; +}; + +/* + * Per device data. + */ +struct dmirror_device { + struct cdev cdevice; + struct hmm_devmem *devmem; + + unsigned int devmem_capacity; + unsigned int devmem_count; + struct dmirror_chunk **devmem_chunks; + struct mutex devmem_lock; /* protects the above */ + + unsigned long calloc; + unsigned long cfree; + struct page *free_pages; + spinlock_t lock; /* protects the above */ +}; + +static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES]; + +static int dmirror_bounce_init(struct dmirror_bounce *bounce, + unsigned long addr, + unsigned long size) +{ + bounce->addr = addr; + bounce->size = size; + bounce->cpages = 0; + bounce->ptr = vmalloc(size); + if (!bounce->ptr) + return -ENOMEM; + return 0; +} + +static void dmirror_bounce_fini(struct dmirror_bounce *bounce) +{ + vfree(bounce->ptr); +} + +static int dmirror_fops_open(struct inode *inode, struct file *filp) +{ + struct cdev *cdev = inode->i_cdev; + struct dmirror *dmirror; + int ret; + + /* Mirror this process address space */ + dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL); + if (dmirror == NULL) + return -ENOMEM; + + dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice); + mutex_init(&dmirror->mutex); + xa_init(&dmirror->pt); + + ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm, + 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops); + if (ret) { + kfree(dmirror); + return ret; + } + + filp->private_data = dmirror; + return 0; +} + +static int dmirror_fops_release(struct inode *inode, struct file *filp) +{ + struct dmirror *dmirror = filp->private_data; + + mmu_interval_notifier_remove(&dmirror->notifier); + xa_destroy(&dmirror->pt); + kfree(dmirror); + return 0; +} + +static struct dmirror_device *dmirror_page_to_device(struct page *page) + +{ + return container_of(page->pgmap, struct dmirror_chunk, + pagemap)->mdevice; +} + +static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range) +{ + unsigned long *pfns = range->hmm_pfns; + unsigned long pfn; + + for (pfn = (range->start >> PAGE_SHIFT); + pfn < (range->end >> PAGE_SHIFT); + pfn++, pfns++) { + struct page *page; + void *entry; + + /* + * Since we asked for hmm_range_fault() to populate pages, + * it shouldn't return an error entry on success. + */ + WARN_ON(*pfns & HMM_PFN_ERROR); + WARN_ON(!(*pfns & HMM_PFN_VALID)); + + page = hmm_pfn_to_page(*pfns); + WARN_ON(!page); + + entry = page; + if (*pfns & HMM_PFN_WRITE) + entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); + else if (WARN_ON(range->default_flags & HMM_PFN_WRITE)) + return -EFAULT; + entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); + if (xa_is_err(entry)) + return xa_err(entry); + } + + return 0; +} + +static void dmirror_do_update(struct dmirror *dmirror, unsigned long start, + unsigned long end) +{ + unsigned long pfn; + void *entry; + + /* + * The XArray doesn't hold references to pages since it relies on + * the mmu notifier to clear page pointers when they become stale. + * Therefore, it is OK to just clear the entry. + */ + xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT, + end >> PAGE_SHIFT) + xa_erase(&dmirror->pt, pfn); +} + +static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct dmirror *dmirror = container_of(mni, struct dmirror, notifier); + + if (mmu_notifier_range_blockable(range)) + mutex_lock(&dmirror->mutex); + else if (!mutex_trylock(&dmirror->mutex)) + return false; + + mmu_interval_set_seq(mni, cur_seq); + dmirror_do_update(dmirror, range->start, range->end); + + mutex_unlock(&dmirror->mutex); + return true; +} + +static const struct mmu_interval_notifier_ops dmirror_min_ops = { + .invalidate = dmirror_interval_invalidate, +}; + +static int dmirror_range_fault(struct dmirror *dmirror, + struct hmm_range *range) +{ + struct mm_struct *mm = dmirror->notifier.mm; + unsigned long timeout = + jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); + int ret; + + while (true) { + if (time_after(jiffies, timeout)) { + ret = -EBUSY; + goto out; + } + + range->notifier_seq = mmu_interval_read_begin(range->notifier); + mmap_read_lock(mm); + ret = hmm_range_fault(range); + mmap_read_unlock(mm); + if (ret) { + if (ret == -EBUSY) + continue; + goto out; + } + + mutex_lock(&dmirror->mutex); + if (mmu_interval_read_retry(range->notifier, + range->notifier_seq)) { + mutex_unlock(&dmirror->mutex); + continue; + } + break; + } + + ret = dmirror_do_fault(dmirror, range); + + mutex_unlock(&dmirror->mutex); +out: + return ret; +} + +static int dmirror_fault(struct dmirror *dmirror, unsigned long start, + unsigned long end, bool write) +{ + struct mm_struct *mm = dmirror->notifier.mm; + unsigned long addr; + unsigned long pfns[64]; + struct hmm_range range = { + .notifier = &dmirror->notifier, + .hmm_pfns = pfns, + .pfn_flags_mask = 0, + .default_flags = + HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0), + .dev_private_owner = dmirror->mdevice, + }; + int ret = 0; + + /* Since the mm is for the mirrored process, get a reference first. */ + if (!mmget_not_zero(mm)) + return 0; + + for (addr = start; addr < end; addr = range.end) { + range.start = addr; + range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); + + ret = dmirror_range_fault(dmirror, &range); + if (ret) + break; + } + + mmput(mm); + return ret; +} + +static int dmirror_do_read(struct dmirror *dmirror, unsigned long start, + unsigned long end, struct dmirror_bounce *bounce) +{ + unsigned long pfn; + void *ptr; + + ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); + + for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { + void *entry; + struct page *page; + void *tmp; + + entry = xa_load(&dmirror->pt, pfn); + page = xa_untag_pointer(entry); + if (!page) + return -ENOENT; + + tmp = kmap(page); + memcpy(ptr, tmp, PAGE_SIZE); + kunmap(page); + + ptr += PAGE_SIZE; + bounce->cpages++; + } + + return 0; +} + +static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) +{ + struct dmirror_bounce bounce; + unsigned long start, end; + unsigned long size = cmd->npages << PAGE_SHIFT; + int ret; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + ret = dmirror_bounce_init(&bounce, start, size); + if (ret) + return ret; + + while (1) { + mutex_lock(&dmirror->mutex); + ret = dmirror_do_read(dmirror, start, end, &bounce); + mutex_unlock(&dmirror->mutex); + if (ret != -ENOENT) + break; + + start = cmd->addr + (bounce.cpages << PAGE_SHIFT); + ret = dmirror_fault(dmirror, start, end, false); + if (ret) + break; + cmd->faults++; + } + + if (ret == 0) { + if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, + bounce.size)) + ret = -EFAULT; + } + cmd->cpages = bounce.cpages; + dmirror_bounce_fini(&bounce); + return ret; +} + +static int dmirror_do_write(struct dmirror *dmirror, unsigned long start, + unsigned long end, struct dmirror_bounce *bounce) +{ + unsigned long pfn; + void *ptr; + + ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); + + for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { + void *entry; + struct page *page; + void *tmp; + + entry = xa_load(&dmirror->pt, pfn); + page = xa_untag_pointer(entry); + if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE) + return -ENOENT; + + tmp = kmap(page); + memcpy(tmp, ptr, PAGE_SIZE); + kunmap(page); + + ptr += PAGE_SIZE; + bounce->cpages++; + } + + return 0; +} + +static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) +{ + struct dmirror_bounce bounce; + unsigned long start, end; + unsigned long size = cmd->npages << PAGE_SHIFT; + int ret; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + ret = dmirror_bounce_init(&bounce, start, size); + if (ret) + return ret; + if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr), + bounce.size)) { + ret = -EFAULT; + goto fini; + } + + while (1) { + mutex_lock(&dmirror->mutex); + ret = dmirror_do_write(dmirror, start, end, &bounce); + mutex_unlock(&dmirror->mutex); + if (ret != -ENOENT) + break; + + start = cmd->addr + (bounce.cpages << PAGE_SHIFT); + ret = dmirror_fault(dmirror, start, end, true); + if (ret) + break; + cmd->faults++; + } + +fini: + cmd->cpages = bounce.cpages; + dmirror_bounce_fini(&bounce); + return ret; +} + +static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, + struct page **ppage) +{ + struct dmirror_chunk *devmem; + struct resource *res; + unsigned long pfn; + unsigned long pfn_first; + unsigned long pfn_last; + void *ptr; + + mutex_lock(&mdevice->devmem_lock); + + if (mdevice->devmem_count == mdevice->devmem_capacity) { + struct dmirror_chunk **new_chunks; + unsigned int new_capacity; + + new_capacity = mdevice->devmem_capacity + + DEVMEM_CHUNKS_RESERVE; + new_chunks = krealloc(mdevice->devmem_chunks, + sizeof(new_chunks[0]) * new_capacity, + GFP_KERNEL); + if (!new_chunks) + goto err; + mdevice->devmem_capacity = new_capacity; + mdevice->devmem_chunks = new_chunks; + } + + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, + "hmm_dmirror"); + if (IS_ERR(res)) + goto err; + + devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); + if (!devmem) + goto err_release; + + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + devmem->pagemap.res = *res; + devmem->pagemap.ops = &dmirror_devmem_ops; + devmem->pagemap.owner = mdevice; + + ptr = memremap_pages(&devmem->pagemap, numa_node_id()); + if (IS_ERR(ptr)) + goto err_free; + + devmem->mdevice = mdevice; + pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT; + pfn_last = pfn_first + + (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT); + mdevice->devmem_chunks[mdevice->devmem_count++] = devmem; + + mutex_unlock(&mdevice->devmem_lock); + + pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n", + DEVMEM_CHUNK_SIZE / (1024 * 1024), + mdevice->devmem_count, + mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)), + pfn_first, pfn_last); + + spin_lock(&mdevice->lock); + for (pfn = pfn_first; pfn < pfn_last; pfn++) { + struct page *page = pfn_to_page(pfn); + + page->zone_device_data = mdevice->free_pages; + mdevice->free_pages = page; + } + if (ppage) { + *ppage = mdevice->free_pages; + mdevice->free_pages = (*ppage)->zone_device_data; + mdevice->calloc++; + } + spin_unlock(&mdevice->lock); + + return true; + +err_free: + kfree(devmem); +err_release: + release_mem_region(devmem->pagemap.res.start, + resource_size(&devmem->pagemap.res)); +err: + mutex_unlock(&mdevice->devmem_lock); + return false; +} + +static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) +{ + struct page *dpage = NULL; + struct page *rpage; + + /* + * This is a fake device so we alloc real system memory to store + * our device memory. + */ + rpage = alloc_page(GFP_HIGHUSER); + if (!rpage) + return NULL; + + spin_lock(&mdevice->lock); + + if (mdevice->free_pages) { + dpage = mdevice->free_pages; + mdevice->free_pages = dpage->zone_device_data; + mdevice->calloc++; + spin_unlock(&mdevice->lock); + } else { + spin_unlock(&mdevice->lock); + if (!dmirror_allocate_chunk(mdevice, &dpage)) + goto error; + } + + dpage->zone_device_data = rpage; + get_page(dpage); + lock_page(dpage); + return dpage; + +error: + __free_page(rpage); + return NULL; +} + +static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, + struct dmirror *dmirror) +{ + struct dmirror_device *mdevice = dmirror->mdevice; + const unsigned long *src = args->src; + unsigned long *dst = args->dst; + unsigned long addr; + + for (addr = args->start; addr < args->end; addr += PAGE_SIZE, + src++, dst++) { + struct page *spage; + struct page *dpage; + struct page *rpage; + + if (!(*src & MIGRATE_PFN_MIGRATE)) + continue; + + /* + * Note that spage might be NULL which is OK since it is an + * unallocated pte_none() or read-only zero page. + */ + spage = migrate_pfn_to_page(*src); + + /* + * Don't migrate device private pages from our own driver or + * others. For our own we would do a device private memory copy + * not a migration and for others, we would need to fault the + * other device's page into system memory first. + */ + if (spage && is_zone_device_page(spage)) + continue; + + dpage = dmirror_devmem_alloc_page(mdevice); + if (!dpage) + continue; + + rpage = dpage->zone_device_data; + if (spage) + copy_highpage(rpage, spage); + else + clear_highpage(rpage); + + /* + * Normally, a device would use the page->zone_device_data to + * point to the mirror but here we use it to hold the page for + * the simulated device memory and that page holds the pointer + * to the mirror. + */ + rpage->zone_device_data = dmirror; + + *dst = migrate_pfn(page_to_pfn(dpage)) | + MIGRATE_PFN_LOCKED; + if ((*src & MIGRATE_PFN_WRITE) || + (!spage && args->vma->vm_flags & VM_WRITE)) + *dst |= MIGRATE_PFN_WRITE; + } +} + +static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, + struct dmirror *dmirror) +{ + unsigned long start = args->start; + unsigned long end = args->end; + const unsigned long *src = args->src; + const unsigned long *dst = args->dst; + unsigned long pfn; + + /* Map the migrated pages into the device's page tables. */ + mutex_lock(&dmirror->mutex); + + for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, + src++, dst++) { + struct page *dpage; + void *entry; + + if (!(*src & MIGRATE_PFN_MIGRATE)) + continue; + + dpage = migrate_pfn_to_page(*dst); + if (!dpage) + continue; + + /* + * Store the page that holds the data so the page table + * doesn't have to deal with ZONE_DEVICE private pages. + */ + entry = dpage->zone_device_data; + if (*dst & MIGRATE_PFN_WRITE) + entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); + entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); + if (xa_is_err(entry)) { + mutex_unlock(&dmirror->mutex); + return xa_err(entry); + } + } + + mutex_unlock(&dmirror->mutex); + return 0; +} + +static int dmirror_migrate(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) +{ + unsigned long start, end, addr; + unsigned long size = cmd->npages << PAGE_SHIFT; + struct mm_struct *mm = dmirror->notifier.mm; + struct vm_area_struct *vma; + unsigned long src_pfns[64]; + unsigned long dst_pfns[64]; + struct dmirror_bounce bounce; + struct migrate_vma args; + unsigned long next; + int ret; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + /* Since the mm is for the mirrored process, get a reference first. */ + if (!mmget_not_zero(mm)) + return -EINVAL; + + mmap_read_lock(mm); + for (addr = start; addr < end; addr = next) { + vma = find_vma(mm, addr); + if (!vma || addr < vma->vm_start || + !(vma->vm_flags & VM_READ)) { + ret = -EINVAL; + goto out; + } + next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); + if (next > vma->vm_end) + next = vma->vm_end; + + args.vma = vma; + args.src = src_pfns; + args.dst = dst_pfns; + args.start = addr; + args.end = next; + args.src_owner = NULL; + ret = migrate_vma_setup(&args); + if (ret) + goto out; + + dmirror_migrate_alloc_and_copy(&args, dmirror); + migrate_vma_pages(&args); + dmirror_migrate_finalize_and_map(&args, dmirror); + migrate_vma_finalize(&args); + } + mmap_read_unlock(mm); + mmput(mm); + + /* Return the migrated data for verification. */ + ret = dmirror_bounce_init(&bounce, start, size); + if (ret) + return ret; + mutex_lock(&dmirror->mutex); + ret = dmirror_do_read(dmirror, start, end, &bounce); + mutex_unlock(&dmirror->mutex); + if (ret == 0) { + if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, + bounce.size)) + ret = -EFAULT; + } + cmd->cpages = bounce.cpages; + dmirror_bounce_fini(&bounce); + return ret; + +out: + mmap_read_unlock(mm); + mmput(mm); + return ret; +} + +static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, + unsigned char *perm, unsigned long entry) +{ + struct page *page; + + if (entry & HMM_PFN_ERROR) { + *perm = HMM_DMIRROR_PROT_ERROR; + return; + } + if (!(entry & HMM_PFN_VALID)) { + *perm = HMM_DMIRROR_PROT_NONE; + return; + } + + page = hmm_pfn_to_page(entry); + if (is_device_private_page(page)) { + /* Is the page migrated to this device or some other? */ + if (dmirror->mdevice == dmirror_page_to_device(page)) + *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL; + else + *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE; + } else if (is_zero_pfn(page_to_pfn(page))) + *perm = HMM_DMIRROR_PROT_ZERO; + else + *perm = HMM_DMIRROR_PROT_NONE; + if (entry & HMM_PFN_WRITE) + *perm |= HMM_DMIRROR_PROT_WRITE; + else + *perm |= HMM_DMIRROR_PROT_READ; +} + +static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct dmirror_interval *dmi = + container_of(mni, struct dmirror_interval, notifier); + struct dmirror *dmirror = dmi->dmirror; + + if (mmu_notifier_range_blockable(range)) + mutex_lock(&dmirror->mutex); + else if (!mutex_trylock(&dmirror->mutex)) + return false; + + /* + * Snapshots only need to set the sequence number since any + * invalidation in the interval invalidates the whole snapshot. + */ + mmu_interval_set_seq(mni, cur_seq); + + mutex_unlock(&dmirror->mutex); + return true; +} + +static const struct mmu_interval_notifier_ops dmirror_mrn_ops = { + .invalidate = dmirror_snapshot_invalidate, +}; + +static int dmirror_range_snapshot(struct dmirror *dmirror, + struct hmm_range *range, + unsigned char *perm) +{ + struct mm_struct *mm = dmirror->notifier.mm; + struct dmirror_interval notifier; + unsigned long timeout = + jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); + unsigned long i; + unsigned long n; + int ret = 0; + + notifier.dmirror = dmirror; + range->notifier = ¬ifier.notifier; + + ret = mmu_interval_notifier_insert(range->notifier, mm, + range->start, range->end - range->start, + &dmirror_mrn_ops); + if (ret) + return ret; + + while (true) { + if (time_after(jiffies, timeout)) { + ret = -EBUSY; + goto out; + } + + range->notifier_seq = mmu_interval_read_begin(range->notifier); + + mmap_read_lock(mm); + ret = hmm_range_fault(range); + mmap_read_unlock(mm); + if (ret) { + if (ret == -EBUSY) + continue; + goto out; + } + + mutex_lock(&dmirror->mutex); + if (mmu_interval_read_retry(range->notifier, + range->notifier_seq)) { + mutex_unlock(&dmirror->mutex); + continue; + } + break; + } + + n = (range->end - range->start) >> PAGE_SHIFT; + for (i = 0; i < n; i++) + dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]); + + mutex_unlock(&dmirror->mutex); +out: + mmu_interval_notifier_remove(range->notifier); + return ret; +} + +static int dmirror_snapshot(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) +{ + struct mm_struct *mm = dmirror->notifier.mm; + unsigned long start, end; + unsigned long size = cmd->npages << PAGE_SHIFT; + unsigned long addr; + unsigned long next; + unsigned long pfns[64]; + unsigned char perm[64]; + char __user *uptr; + struct hmm_range range = { + .hmm_pfns = pfns, + .dev_private_owner = dmirror->mdevice, + }; + int ret = 0; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + /* Since the mm is for the mirrored process, get a reference first. */ + if (!mmget_not_zero(mm)) + return -EINVAL; + + /* + * Register a temporary notifier to detect invalidations even if it + * overlaps with other mmu_interval_notifiers. + */ + uptr = u64_to_user_ptr(cmd->ptr); + for (addr = start; addr < end; addr = next) { + unsigned long n; + + next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); + range.start = addr; + range.end = next; + + ret = dmirror_range_snapshot(dmirror, &range, perm); + if (ret) + break; + + n = (range.end - range.start) >> PAGE_SHIFT; + if (copy_to_user(uptr, perm, n)) { + ret = -EFAULT; + break; + } + + cmd->cpages += n; + uptr += n; + } + mmput(mm); + + return ret; +} + +static long dmirror_fops_unlocked_ioctl(struct file *filp, + unsigned int command, + unsigned long arg) +{ + void __user *uarg = (void __user *)arg; + struct hmm_dmirror_cmd cmd; + struct dmirror *dmirror; + int ret; + + dmirror = filp->private_data; + if (!dmirror) + return -EINVAL; + + if (copy_from_user(&cmd, uarg, sizeof(cmd))) + return -EFAULT; + + if (cmd.addr & ~PAGE_MASK) + return -EINVAL; + if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT))) + return -EINVAL; + + cmd.cpages = 0; + cmd.faults = 0; + + switch (command) { + case HMM_DMIRROR_READ: + ret = dmirror_read(dmirror, &cmd); + break; + + case HMM_DMIRROR_WRITE: + ret = dmirror_write(dmirror, &cmd); + break; + + case HMM_DMIRROR_MIGRATE: + ret = dmirror_migrate(dmirror, &cmd); + break; + + case HMM_DMIRROR_SNAPSHOT: + ret = dmirror_snapshot(dmirror, &cmd); + break; + + default: + return -EINVAL; + } + if (ret) + return ret; + + if (copy_to_user(uarg, &cmd, sizeof(cmd))) + return -EFAULT; + + return 0; +} + +static const struct file_operations dmirror_fops = { + .open = dmirror_fops_open, + .release = dmirror_fops_release, + .unlocked_ioctl = dmirror_fops_unlocked_ioctl, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; + +static void dmirror_devmem_free(struct page *page) +{ + struct page *rpage = page->zone_device_data; + struct dmirror_device *mdevice; + + if (rpage) + __free_page(rpage); + + mdevice = dmirror_page_to_device(page); + + spin_lock(&mdevice->lock); + mdevice->cfree++; + page->zone_device_data = mdevice->free_pages; + mdevice->free_pages = page; + spin_unlock(&mdevice->lock); +} + +static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, + struct dmirror_device *mdevice) +{ + const unsigned long *src = args->src; + unsigned long *dst = args->dst; + unsigned long start = args->start; + unsigned long end = args->end; + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE, + src++, dst++) { + struct page *dpage, *spage; + + spage = migrate_pfn_to_page(*src); + if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) + continue; + spage = spage->zone_device_data; + + dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); + if (!dpage) + continue; + + lock_page(dpage); + copy_highpage(dpage, spage); + *dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + if (*src & MIGRATE_PFN_WRITE) + *dst |= MIGRATE_PFN_WRITE; + } + return 0; +} + +static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args, + struct dmirror *dmirror) +{ + /* Invalidate the device's page table mapping. */ + mutex_lock(&dmirror->mutex); + dmirror_do_update(dmirror, args->start, args->end); + mutex_unlock(&dmirror->mutex); +} + +static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) +{ + struct migrate_vma args; + unsigned long src_pfns; + unsigned long dst_pfns; + struct page *rpage; + struct dmirror *dmirror; + vm_fault_t ret; + + /* + * Normally, a device would use the page->zone_device_data to point to + * the mirror but here we use it to hold the page for the simulated + * device memory and that page holds the pointer to the mirror. + */ + rpage = vmf->page->zone_device_data; + dmirror = rpage->zone_device_data; + + /* FIXME demonstrate how we can adjust migrate range */ + args.vma = vmf->vma; + args.start = vmf->address; + args.end = args.start + PAGE_SIZE; + args.src = &src_pfns; + args.dst = &dst_pfns; + args.src_owner = dmirror->mdevice; + + if (migrate_vma_setup(&args)) + return VM_FAULT_SIGBUS; + + ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice); + if (ret) + return ret; + migrate_vma_pages(&args); + dmirror_devmem_fault_finalize_and_map(&args, dmirror); + migrate_vma_finalize(&args); + return 0; +} + +static const struct dev_pagemap_ops dmirror_devmem_ops = { + .page_free = dmirror_devmem_free, + .migrate_to_ram = dmirror_devmem_fault, +}; + +static int dmirror_device_init(struct dmirror_device *mdevice, int id) +{ + dev_t dev; + int ret; + + dev = MKDEV(MAJOR(dmirror_dev), id); + mutex_init(&mdevice->devmem_lock); + spin_lock_init(&mdevice->lock); + + cdev_init(&mdevice->cdevice, &dmirror_fops); + mdevice->cdevice.owner = THIS_MODULE; + ret = cdev_add(&mdevice->cdevice, dev, 1); + if (ret) + return ret; + + /* Build a list of free ZONE_DEVICE private struct pages */ + dmirror_allocate_chunk(mdevice, NULL); + + return 0; +} + +static void dmirror_device_remove(struct dmirror_device *mdevice) +{ + unsigned int i; + + if (mdevice->devmem_chunks) { + for (i = 0; i < mdevice->devmem_count; i++) { + struct dmirror_chunk *devmem = + mdevice->devmem_chunks[i]; + + memunmap_pages(&devmem->pagemap); + release_mem_region(devmem->pagemap.res.start, + resource_size(&devmem->pagemap.res)); + kfree(devmem); + } + kfree(mdevice->devmem_chunks); + } + + cdev_del(&mdevice->cdevice); +} + +static int __init hmm_dmirror_init(void) +{ + int ret; + int id; + + ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES, + "HMM_DMIRROR"); + if (ret) + goto err_unreg; + + for (id = 0; id < DMIRROR_NDEVICES; id++) { + ret = dmirror_device_init(dmirror_devices + id, id); + if (ret) + goto err_chrdev; + } + + /* + * Allocate a zero page to simulate a reserved page of device private + * memory which is always zero. The zero_pfn page isn't used just to + * make the code here simpler (i.e., we need a struct page for it). + */ + dmirror_zero_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); + if (!dmirror_zero_page) { + ret = -ENOMEM; + goto err_chrdev; + } + + pr_info("HMM test module loaded. This is only for testing HMM.\n"); + return 0; + +err_chrdev: + while (--id >= 0) + dmirror_device_remove(dmirror_devices + id); + unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); +err_unreg: + return ret; +} + +static void __exit hmm_dmirror_exit(void) +{ + int id; + + if (dmirror_zero_page) + __free_page(dmirror_zero_page); + for (id = 0; id < DMIRROR_NDEVICES; id++) + dmirror_device_remove(dmirror_devices + id); + unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); +} + +module_init(hmm_dmirror_init); +module_exit(hmm_dmirror_exit); +MODULE_LICENSE("GPL"); diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h new file mode 100644 index 000000000000..67b3b2e6ff5d --- /dev/null +++ b/lib/test_hmm_uapi.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * This is a module to test the HMM (Heterogeneous Memory Management) API + * of the kernel. It allows a userspace program to expose its entire address + * space through the HMM test module device file. + */ +#ifndef _LIB_TEST_HMM_UAPI_H +#define _LIB_TEST_HMM_UAPI_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * Structure to pass to the HMM test driver to mimic a device accessing + * system memory and ZONE_DEVICE private memory through device page tables. + * + * @addr: (in) user address the device will read/write + * @ptr: (in) user address where device data is copied to/from + * @npages: (in) number of pages to read/write + * @cpages: (out) number of pages copied + * @faults: (out) number of device page faults seen + */ +struct hmm_dmirror_cmd { + __u64 addr; + __u64 ptr; + __u64 npages; + __u64 cpages; + __u64 faults; +}; + +/* Expose the address space of the calling process through hmm device file */ +#define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd) + +/* + * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT. + * HMM_DMIRROR_PROT_ERROR: no valid mirror PTE for this page + * HMM_DMIRROR_PROT_NONE: unpopulated PTE or PTE with no access + * HMM_DMIRROR_PROT_READ: read-only PTE + * HMM_DMIRROR_PROT_WRITE: read/write PTE + * HMM_DMIRROR_PROT_ZERO: special read-only zero page + * HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL: Migrated device private page on the + * device the ioctl() is made + * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some + * other device + */ +enum { + HMM_DMIRROR_PROT_ERROR = 0xFF, + HMM_DMIRROR_PROT_NONE = 0x00, + HMM_DMIRROR_PROT_READ = 0x01, + HMM_DMIRROR_PROT_WRITE = 0x02, + HMM_DMIRROR_PROT_ZERO = 0x10, + HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20, + HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30, +}; + +#endif /* _LIB_TEST_HMM_UAPI_H */ diff --git a/lib/test_kasan.c b/lib/test_kasan.c index e3087d90e00d..dc2c6a51d11a 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -24,6 +24,14 @@ #include <asm/page.h> /* + * We assign some test results to these globals to make sure the tests + * are not eliminated as dead code. + */ + +int kasan_int_result; +void *kasan_ptr_result; + +/* * Note: test functions are marked noinline so that their names appear in * reports. */ @@ -622,7 +630,7 @@ static noinline void __init kasan_memchr(void) if (!ptr) return; - memchr(ptr, '1', size + 1); + kasan_ptr_result = memchr(ptr, '1', size + 1); kfree(ptr); } @@ -638,7 +646,7 @@ static noinline void __init kasan_memcmp(void) return; memset(arr, 0, sizeof(arr)); - memcmp(ptr, arr, size+1); + kasan_int_result = memcmp(ptr, arr, size + 1); kfree(ptr); } @@ -661,22 +669,22 @@ static noinline void __init kasan_strings(void) * will likely point to zeroed byte. */ ptr += 16; - strchr(ptr, '1'); + kasan_ptr_result = strchr(ptr, '1'); pr_info("use-after-free in strrchr\n"); - strrchr(ptr, '1'); + kasan_ptr_result = strrchr(ptr, '1'); pr_info("use-after-free in strcmp\n"); - strcmp(ptr, "2"); + kasan_int_result = strcmp(ptr, "2"); pr_info("use-after-free in strncmp\n"); - strncmp(ptr, "2", 1); + kasan_int_result = strncmp(ptr, "2", 1); pr_info("use-after-free in strlen\n"); - strlen(ptr); + kasan_int_result = strlen(ptr); pr_info("use-after-free in strnlen\n"); - strnlen(ptr, 1); + kasan_int_result = strnlen(ptr, 1); } static noinline void __init kasan_bitops(void) @@ -743,11 +751,12 @@ static noinline void __init kasan_bitops(void) __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); pr_info("out-of-bounds in test_bit\n"); - (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); #if defined(clear_bit_unlock_is_negative_byte) pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n"); - clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG + + BITS_PER_BYTE, bits); #endif kfree(bits); } diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c new file mode 100644 index 000000000000..676e0b8abcdd --- /dev/null +++ b/lib/test_linear_ranges.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test for the linear_ranges helper. + * + * Copyright (C) 2020, ROHM Semiconductors. + * Author: Matti Vaittinen <matti.vaittien@fi.rohmeurope.com> + */ +#include <kunit/test.h> + +#include <linux/linear_range.h> + +/* First things first. I deeply dislike unit-tests. I have seen all the hell + * breaking loose when people who think the unit tests are "the silver bullet" + * to kill bugs get to decide how a company should implement testing strategy... + * + * Believe me, it may get _really_ ridiculous. It is tempting to think that + * walking through all the possible execution branches will nail down 100% of + * bugs. This may lead to ideas about demands to get certain % of "test + * coverage" - measured as line coverage. And that is one of the worst things + * you can do. + * + * Ask people to provide line coverage and they do. I've seen clever tools + * which generate test cases to test the existing functions - and by default + * these tools expect code to be correct and just generate checks which are + * passing when ran against current code-base. Run this generator and you'll get + * tests that do not test code is correct but just verify nothing changes. + * Problem is that testing working code is pointless. And if it is not + * working, your test must not assume it is working. You won't catch any bugs + * by such tests. What you can do is to generate a huge amount of tests. + * Especially if you were are asked to proivde 100% line-coverage x_x. So what + * does these tests - which are not finding any bugs now - do? + * + * They add inertia to every future development. I think it was Terry Pratchet + * who wrote someone having same impact as thick syrup has to chronometre. + * Excessive amount of unit-tests have this effect to development. If you do + * actually find _any_ bug from code in such environment and try fixing it... + * ...chances are you also need to fix the test cases. In sunny day you fix one + * test. But I've done refactoring which resulted 500+ broken tests (which had + * really zero value other than proving to managers that we do do "quality")... + * + * After this being said - there are situations where UTs can be handy. If you + * have algorithms which take some input and should produce output - then you + * can implement few, carefully selected simple UT-cases which test this. I've + * previously used this for example for netlink and device-tree data parsing + * functions. Feed some data examples to functions and verify the output is as + * expected. I am not covering all the cases but I will see the logic should be + * working. + * + * Here we also do some minor testing. I don't want to go through all branches + * or test more or less obvious things - but I want to see the main logic is + * working. And I definitely don't want to add 500+ test cases that break when + * some simple fix is done x_x. So - let's only add few, well selected tests + * which ensure as much logic is good as possible. + */ + +/* + * Test Range 1: + * selectors: 2 3 4 5 6 + * values (5): 10 20 30 40 50 + * + * Test Range 2: + * selectors: 7 8 9 10 + * values (4): 100 150 200 250 + */ + +#define RANGE1_MIN 10 +#define RANGE1_MIN_SEL 2 +#define RANGE1_STEP 10 + +/* 2, 3, 4, 5, 6 */ +static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1, + RANGE1_MIN_SEL + 2, + RANGE1_MIN_SEL + 3, + RANGE1_MIN_SEL + 4 }; +/* 10, 20, 30, 40, 50 */ +static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN + + RANGE1_STEP, + RANGE1_MIN + RANGE1_STEP * 2, + RANGE1_MIN + RANGE1_STEP * 3, + RANGE1_MIN + RANGE1_STEP * 4 }; + +#define RANGE2_MIN 100 +#define RANGE2_MIN_SEL 7 +#define RANGE2_STEP 50 + +/* 7, 8, 9, 10 */ +static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1, + RANGE2_MIN_SEL + 2, + RANGE2_MIN_SEL + 3 }; +/* 100, 150, 200, 250 */ +static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN + + RANGE2_STEP, + RANGE2_MIN + RANGE2_STEP * 2, + RANGE2_MIN + RANGE2_STEP * 3 }; + +#define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals)) +#define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals)) +#define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS) + +#define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1) +#define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1]) + +#define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1) +#define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1]) + +#define SMALLEST_SEL RANGE1_MIN_SEL +#define SMALLEST_VAL RANGE1_MIN + +static struct linear_range testr[] = { + { + .min = RANGE1_MIN, + .min_sel = RANGE1_MIN_SEL, + .max_sel = RANGE1_MAX_SEL, + .step = RANGE1_STEP, + }, { + .min = RANGE2_MIN, + .min_sel = RANGE2_MIN_SEL, + .max_sel = RANGE2_MAX_SEL, + .step = RANGE2_STEP + }, +}; + +static void range_test_get_value(struct kunit *test) +{ + int ret, i; + unsigned int sel, val; + + for (i = 0; i < RANGE1_NUM_VALS; i++) { + sel = range1_sels[i]; + ret = linear_range_get_value_array(&testr[0], 2, sel, &val); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, val, range1_vals[i]); + } + for (i = 0; i < RANGE2_NUM_VALS; i++) { + sel = range2_sels[i]; + ret = linear_range_get_value_array(&testr[0], 2, sel, &val); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, val, range2_vals[i]); + } + ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val); + KUNIT_EXPECT_NE(test, 0, ret); +} + +static void range_test_get_selector_high(struct kunit *test) +{ + int ret, i; + unsigned int sel; + bool found; + + for (i = 0; i < RANGE1_NUM_VALS; i++) { + ret = linear_range_get_selector_high(&testr[0], range1_vals[i], + &sel, &found); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, sel, range1_sels[i]); + KUNIT_EXPECT_TRUE(test, found); + } + + ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1, + &sel, &found); + KUNIT_EXPECT_LE(test, ret, 0); + + ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1, + &sel, &found); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_FALSE(test, found); + KUNIT_EXPECT_EQ(test, sel, range1_sels[0]); +} + +static void range_test_get_value_amount(struct kunit *test) +{ + int ret; + + ret = linear_range_values_in_range_array(&testr[0], 2); + KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret); +} + +static void range_test_get_selector_low(struct kunit *test) +{ + int i, ret; + unsigned int sel; + bool found; + + for (i = 0; i < RANGE1_NUM_VALS; i++) { + ret = linear_range_get_selector_low_array(&testr[0], 2, + range1_vals[i], &sel, + &found); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, sel, range1_sels[i]); + KUNIT_EXPECT_TRUE(test, found); + } + for (i = 0; i < RANGE2_NUM_VALS; i++) { + ret = linear_range_get_selector_low_array(&testr[0], 2, + range2_vals[i], &sel, + &found); + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, sel, range2_sels[i]); + KUNIT_EXPECT_TRUE(test, found); + } + + /* + * Seek value greater than range max => get_selector_*_low should + * return Ok - but set found to false as value is not in range + */ + ret = linear_range_get_selector_low_array(&testr[0], 2, + range2_vals[RANGE2_NUM_VALS - 1] + 1, + &sel, &found); + + KUNIT_EXPECT_EQ(test, 0, ret); + KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]); + KUNIT_EXPECT_FALSE(test, found); +} + +static struct kunit_case range_test_cases[] = { + KUNIT_CASE(range_test_get_value_amount), + KUNIT_CASE(range_test_get_selector_high), + KUNIT_CASE(range_test_get_selector_low), + KUNIT_CASE(range_test_get_value), + {}, +}; + +static struct kunit_suite range_test_module = { + .name = "linear-ranges-test", + .test_cases = range_test_cases, +}; + +kunit_test_suites(&range_test_module); + +MODULE_LICENSE("GPL"); diff --git a/lib/test_lockup.c b/lib/test_lockup.c index ea09ca335b21..f258743a0d83 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -103,7 +103,7 @@ MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls"); static bool lock_mmap_sem; module_param(lock_mmap_sem, bool, 0400); -MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_sem: block procfs interfaces"); +MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces"); static unsigned long lock_rwsem_ptr; module_param_unsafe(lock_rwsem_ptr, ulong, 0400); @@ -142,7 +142,7 @@ module_param(reallocate_pages, bool, 0400); MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations"); struct file *test_file; -struct inode *test_inode; +static struct inode *test_inode; static char test_file_path[256]; module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400); MODULE_PARM_DESC(file_path, "file path to test"); @@ -191,11 +191,11 @@ static void test_lock(bool master, bool verbose) if (lock_mmap_sem && master) { if (verbose) - pr_notice("lock mmap_sem pid=%d\n", main_task->pid); + pr_notice("lock mmap_lock pid=%d\n", main_task->pid); if (lock_read) - down_read(&main_task->mm->mmap_sem); + mmap_read_lock(main_task->mm); else - down_write(&main_task->mm->mmap_sem); + mmap_write_lock(main_task->mm); } if (test_disable_irq) @@ -276,11 +276,11 @@ static void test_unlock(bool master, bool verbose) if (lock_mmap_sem && master) { if (lock_read) - up_read(&main_task->mm->mmap_sem); + mmap_read_unlock(main_task->mm); else - up_write(&main_task->mm->mmap_sem); + mmap_write_unlock(main_task->mm); if (verbose) - pr_notice("unlock mmap_sem pid=%d\n", main_task->pid); + pr_notice("unlock mmap_lock pid=%d\n", main_task->pid); } if (lock_rwsem_ptr && master) { @@ -505,7 +505,7 @@ static int __init test_lockup_init(void) } if (lock_mmap_sem && !main_task->mm) { - pr_err("no mm to lock mmap_sem\n"); + pr_err("no mm to lock mmap_lock\n"); return -EINVAL; } diff --git a/lib/test_printf.c b/lib/test_printf.c index 2d9f520d2f27..7ac87f18a10f 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -214,6 +214,7 @@ test_string(void) #define PTR_STR "ffff0123456789ab" #define PTR_VAL_NO_CRNG "(____ptrval____)" #define ZEROS "00000000" /* hex 32 zero bits */ +#define ONES "ffffffff" /* hex 32 one bits */ static int __init plain_format(void) @@ -245,6 +246,7 @@ plain_format(void) #define PTR_STR "456789ab" #define PTR_VAL_NO_CRNG "(ptrval)" #define ZEROS "" +#define ONES "" static int __init plain_format(void) @@ -330,14 +332,28 @@ test_hashed(const char *fmt, const void *p) test(buf, fmt, p); } +/* + * NULL pointers aren't hashed. + */ static void __init null_pointer(void) { - test_hashed("%p", NULL); + test(ZEROS "00000000", "%p", NULL); test(ZEROS "00000000", "%px", NULL); test("(null)", "%pE", NULL); } +/* + * Error pointers aren't hashed. + */ +static void __init +error_pointer(void) +{ + test(ONES "fffffff5", "%p", ERR_PTR(-11)); + test(ONES "fffffff5", "%px", ERR_PTR(-11)); + test("(efault)", "%pE", ERR_PTR(-11)); +} + #define PTR_INVALID ((void *)0x000000ab) static void __init @@ -478,7 +494,7 @@ struct_va_format(void) } static void __init -struct_rtc_time(void) +time_and_date(void) { /* 1543210543 */ const struct rtc_time tm = { @@ -489,14 +505,21 @@ struct_rtc_time(void) .tm_mon = 10, .tm_year = 118, }; + /* 2019-01-04T15:32:23 */ + time64_t t = 1546615943; - test("(%ptR?)", "%pt", &tm); + test("(%pt?)", "%pt", &tm); test("2018-11-26T05:35:43", "%ptR", &tm); test("0118-10-26T05:35:43", "%ptRr", &tm); test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm); test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm); test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm); test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm); + + test("2019-01-04T15:32:23", "%ptT", &t); + test("0119-00-04T15:32:23", "%ptTr", &t); + test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t); + test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t); } static void __init @@ -621,7 +644,9 @@ static void __init fwnode_pointer(void) test(second_name, "%pfwP", software_node_fwnode(&softnodes[1])); test(third_name, "%pfwP", software_node_fwnode(&softnodes[2])); - software_node_unregister_nodes(softnodes); + software_node_unregister(&softnodes[2]); + software_node_unregister(&softnodes[1]); + software_node_unregister(&softnodes[0]); } static void __init @@ -649,6 +674,7 @@ test_pointer(void) { plain(); null_pointer(); + error_pointer(); invalid_pointer(); symbol_ptr(); kernel_ptr(); @@ -661,7 +687,7 @@ test_pointer(void) uuid(); dentry(); struct_va_format(); - struct_rtc_time(); + time_and_date(); struct_clk(); bitmap(); netdev_features(); diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 566dad3f4196..98bc92a91662 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -44,6 +44,8 @@ struct test_sysctl_data { int int_0002; int int_0003[4]; + int boot_int; + unsigned int uint_0001; char string_0001[65]; @@ -61,6 +63,8 @@ static struct test_sysctl_data test_data = { .int_0003[2] = 2, .int_0003[3] = 3, + .boot_int = 0, + .uint_0001 = 314, .string_0001 = "(none)", @@ -92,6 +96,15 @@ static struct ctl_table test_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "boot_int", + .data = &test_data.boot_int, + .maxlen = sizeof(test_data.boot_int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { .procname = "uint_0001", .data = &test_data.uint_0001, .maxlen = sizeof(unsigned int), @@ -149,7 +162,7 @@ static int __init test_sysctl_init(void) } return 0; } -late_initcall(test_sysctl_init); +module_init(test_sysctl_init); static void __exit test_sysctl_exit(void) { diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 8bbefcaddfe8..ddc9685702b1 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -91,12 +91,8 @@ static int random_size_align_alloc_test(void) */ size = ((rnd % 10) + 1) * PAGE_SIZE; - ptr = __vmalloc_node_range(size, align, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; @@ -118,12 +114,8 @@ static int align_shift_alloc_test(void) for (i = 0; i < BITS_PER_LONG; i++) { align = ((unsigned long) 1) << i; - ptr = __vmalloc_node_range(PAGE_SIZE, align, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; @@ -139,13 +131,9 @@ static int fix_align_alloc_test(void) int i; for (i = 0; i < test_loop_count; i++) { - ptr = __vmalloc_node_range(5 * PAGE_SIZE, - THREAD_ALIGN << 1, - VMALLOC_START, VMALLOC_END, - GFP_KERNEL | __GFP_ZERO, - PAGE_KERNEL, - 0, 0, __builtin_return_address(0)); - + ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1, + GFP_KERNEL | __GFP_ZERO, 0, + __builtin_return_address(0)); if (!ptr) return -1; diff --git a/lib/ubsan.c b/lib/ubsan.c index f8c0ccf35f29..cb9af3f6b77e 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -189,7 +189,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, ubsan_epilogue(); } -void __ubsan_handle_add_overflow(struct overflow_data *data, +void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs) { @@ -197,23 +197,23 @@ void __ubsan_handle_add_overflow(struct overflow_data *data, } EXPORT_SYMBOL(__ubsan_handle_add_overflow); -void __ubsan_handle_sub_overflow(struct overflow_data *data, +void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '-'); } EXPORT_SYMBOL(__ubsan_handle_sub_overflow); -void __ubsan_handle_mul_overflow(struct overflow_data *data, +void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '*'); } EXPORT_SYMBOL(__ubsan_handle_mul_overflow); -void __ubsan_handle_negate_overflow(struct overflow_data *data, - void *old_val) +void __ubsan_handle_negate_overflow(void *_data, void *old_val) { + struct overflow_data *data = _data; char old_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) @@ -231,9 +231,9 @@ void __ubsan_handle_negate_overflow(struct overflow_data *data, EXPORT_SYMBOL(__ubsan_handle_negate_overflow); -void __ubsan_handle_divrem_overflow(struct overflow_data *data, - void *lhs, void *rhs) +void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs) { + struct overflow_data *data = _data; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) @@ -326,10 +326,9 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, } EXPORT_SYMBOL(__ubsan_handle_type_mismatch); -void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, - void *ptr) +void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr) { - + struct type_mismatch_data_v1 *data = _data; struct type_mismatch_data_common common_data = { .location = &data->location, .type = data->type, @@ -341,8 +340,9 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, } EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); -void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) +void __ubsan_handle_out_of_bounds(void *_data, void *index) { + struct out_of_bounds_data *data = _data; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) @@ -357,9 +357,9 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); -void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - void *lhs, void *rhs) +void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs) { + struct shift_out_of_bounds_data *data = _data; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; @@ -399,8 +399,9 @@ out: EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); -void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) +void __ubsan_handle_builtin_unreachable(void *_data) { + struct unreachable_data *data = _data; ubsan_prologue(&data->location, "unreachable"); pr_err("calling __builtin_unreachable()\n"); ubsan_epilogue(); @@ -408,9 +409,9 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); -void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, - void *val) +void __ubsan_handle_load_invalid_value(void *_data, void *val) { + struct invalid_value_data *data = _data; char val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) diff --git a/lib/usercopy.c b/lib/usercopy.c index 4bb1c5e7a3eb..b26509f112f9 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -59,7 +59,7 @@ int check_zeroed_user(const void __user *from, size_t size) from -= align; size += align; - if (!user_access_begin(from, size)) + if (!user_read_access_begin(from, size)) return -EFAULT; unsafe_get_user(val, (unsigned long __user *) from, err_fault); @@ -80,10 +80,10 @@ int check_zeroed_user(const void __user *from, size_t size) val &= aligned_byte_mask(size); done: - user_access_end(); + user_read_access_end(); return (val == 0); err_fault: - user_access_end(); + user_read_access_end(); return -EFAULT; } EXPORT_SYMBOL(check_zeroed_user); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7c488a1ce318..259e55895933 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -34,6 +34,7 @@ #include <linux/dcache.h> #include <linux/cred.h> #include <linux/rtc.h> +#include <linux/time.h> #include <linux/uuid.h> #include <linux/of.h> #include <net/addrconf.h> @@ -58,7 +59,7 @@ * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * - * This function is obsolete. Please use kstrtoull instead. + * This function has caveats. Please use kstrtoull instead. */ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { @@ -83,7 +84,7 @@ EXPORT_SYMBOL(simple_strtoull); * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * - * This function is obsolete. Please use kstrtoul instead. + * This function has caveats. Please use kstrtoul instead. */ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { @@ -97,7 +98,7 @@ EXPORT_SYMBOL(simple_strtoul); * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * - * This function is obsolete. Please use kstrtol instead. + * This function has caveats. Please use kstrtol instead. */ long simple_strtol(const char *cp, char **endp, unsigned int base) { @@ -114,7 +115,7 @@ EXPORT_SYMBOL(simple_strtol); * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * - * This function is obsolete. Please use kstrtoll instead. + * This function has caveats. Please use kstrtoll instead. */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { @@ -794,6 +795,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr, unsigned long hashval; int ret; + /* + * Print the real pointer value for NULL and error pointers, + * as they are not actual addresses. + */ + if (IS_ERR_OR_NULL(ptr)) + return pointer_string(buf, end, ptr, spec); + /* When debugging early boot use non-cryptographically secure hash. */ if (unlikely(debug_boot_weak_hash)) { hashval = hash_long((unsigned long)ptr, 32); @@ -1820,14 +1828,39 @@ char *rtc_str(char *buf, char *end, const struct rtc_time *tm, } static noinline_for_stack +char *time64_str(char *buf, char *end, const time64_t time, + struct printf_spec spec, const char *fmt) +{ + struct rtc_time rtc_time; + struct tm tm; + + time64_to_tm(time, 0, &tm); + + rtc_time.tm_sec = tm.tm_sec; + rtc_time.tm_min = tm.tm_min; + rtc_time.tm_hour = tm.tm_hour; + rtc_time.tm_mday = tm.tm_mday; + rtc_time.tm_mon = tm.tm_mon; + rtc_time.tm_year = tm.tm_year; + rtc_time.tm_wday = tm.tm_wday; + rtc_time.tm_yday = tm.tm_yday; + + rtc_time.tm_isdst = 0; + + return rtc_str(buf, end, &rtc_time, spec, fmt); +} + +static noinline_for_stack char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec, const char *fmt) { switch (fmt[1]) { case 'R': return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt); + case 'T': + return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt); default: - return error_string(buf, end, "(%ptR?)", spec); + return error_string(buf, end, "(%pt?)", spec); } } @@ -2143,8 +2176,9 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode, * - 'd[234]' For a dentry name (optionally 2-4 last components) * - 'D[234]' Same as 'd' but for a struct file * - 'g' For block_device name (gendisk + partition number) - * - 't[R][dt][r]' For time and date as represented: + * - 't[RT][dt][r]' For time and date as represented by: * R struct rtc_time + * T time64_t * - 'C' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address @@ -2168,6 +2202,10 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode, * f full name * P node name, including a possible unit address * - 'x' For printing the address. Equivalent to "%lx". + * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of + * bpf_trace_printk() where [ku] prefix specifies either kernel (k) + * or user (u) memory to probe, and: + * s a string, equivalent to "%s" on direct vsnprintf() use * * ** When making changes please also update: * Documentation/core-api/printk-formats.rst @@ -2251,6 +2289,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, if (!IS_ERR(ptr)) break; return err_ptr(buf, end, ptr, spec); + case 'u': + case 'k': + switch (fmt[1]) { + case 's': + return string(buf, end, ptr, spec); + default: + return error_string(buf, end, "(einval)", spec); + } } /* default is to _not_ leak addresses, hash before printing */ diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 2c13ecc5bb2c..ed1f3df27260 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -10,17 +10,6 @@ #ifndef ASMINF -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ union uu { unsigned short us; unsigned char b[2]; @@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) return mm.us; } -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned16((a)++) -#else -# define OFF 1 -# define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned16(++(a)) -#endif - /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is @@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) /* copy state to local variables */ state = (struct inflate_state *)strm->state; - in = strm->next_in - OFF; + in = strm->next_in; last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; + out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT @@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) input data or output space */ do { if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; @@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ - PUP(out) = (unsigned char)(this.val); + *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); @@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; } if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; @@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } } @@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) state->mode = BAD; break; } - from = window - OFF; + from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from end of window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); - from = window - OFF; + from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else { @@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ - if (!((long)(out - 1 + OFF) & 1)) { - PUP(out) = PUP(from); + if (!((long)(out - 1) & 1)) { + *out++ = *from++; len--; } - sout = (unsigned short *)(out - OFF); + sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; - sfrom = (unsigned short *)(from - OFF); + sfrom = (unsigned short *)(from); loops = len >> 1; do #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - PUP(sout) = PUP(sfrom); + *sout++ = *sfrom++; #else - PUP(sout) = UP_UNALIGNED(sfrom); + *sout++ = get_unaligned16(sfrom++); #endif while (--loops); - out = (unsigned char *)sout + OFF; - from = (unsigned char *)sfrom + OFF; + out = (unsigned char *)sout; + from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-1+OFF); + pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ @@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) } loops = len >> 1; do - PUP(sout) = pat16; + *sout++ = pat16; while (--loops); - out = (unsigned char *)sout + OFF; + out = (unsigned char *)sout; } if (len & 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ @@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) hold &= (1U << bits) - 1; /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; + strm->next_in = in; + strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); |