summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig12
-rw-r--r--lib/Kconfig.debug96
-rw-r--r--lib/Kconfig.kasan14
-rw-r--r--lib/Kconfig.kfence82
-rw-r--r--lib/Kconfig.ubsan17
-rw-r--r--lib/Makefile3
-rw-r--r--lib/asn1_encoder.c454
-rw-r--r--lib/bug.c3
-rw-r--r--lib/cmdline.c7
-rw-r--r--lib/crypto/chacha.c4
-rw-r--r--lib/crypto/poly1305-donna32.c3
-rw-r--r--lib/crypto/poly1305-donna64.c3
-rw-r--r--lib/crypto/poly1305.c3
-rw-r--r--lib/devres.c22
-rw-r--r--lib/earlycpio.c4
-rw-r--r--lib/extable.c5
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/iov_iter.c332
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/list_sort.c17
-rw-r--r--lib/logic_pio.c3
-rw-r--r--lib/lru_cache.c3
-rw-r--r--lib/math/div64.c1
-rw-r--r--lib/oid_registry.c24
-rw-r--r--lib/parman.c4
-rw-r--r--lib/parser.c44
-rw-r--r--lib/radix-tree.c11
-rw-r--r--lib/stackdepot.c37
-rw-r--r--lib/test_kasan.c540
-rw-r--r--lib/test_kasan_module.c7
-rw-r--r--lib/test_list_sort.c3
-rw-r--r--lib/test_ubsan.c49
-rw-r--r--lib/test_xarray.c26
-rw-r--r--lib/ubsan.c68
-rw-r--r--lib/xarray.c11
35 files changed, 1528 insertions, 396 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 46806332a8cc..ac3b30697b2b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -651,6 +651,15 @@ config STACKDEPOT
bool
select STACKTRACE
+config STACK_HASH_ORDER
+ int "stack depot hash size (12 => 4KB, 20 => 1024KB)"
+ range 12 20
+ default 20
+ depends on STACKDEPOT
+ help
+ Select the hash size as a power of 2 for the stackdepot hash table.
+ Choose a lower value to reduce the memory impact.
+
config SBITMAP
bool
@@ -692,3 +701,6 @@ config GENERIC_LIB_DEVMEM_IS_ALLOWED
config PLDMFW
bool
default n
+
+config ASN1_ENCODER
+ tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5ea0c1773b0a..2c7f46b366f1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -256,14 +256,51 @@ config DEBUG_INFO_SPLIT
to know about the .dwo files and include them.
Incompatible with older versions of ccache.
+choice
+ prompt "DWARF version"
+ help
+ Which version of DWARF debug info to emit.
+
+config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ bool "Rely on the toolchain's implicit default DWARF version"
+ help
+ The implicit default version of DWARF debug info produced by a
+ toolchain changes over time.
+
+ This can break consumers of the debug info that haven't upgraded to
+ support newer revisions, and prevent testing newer versions, but
+ those should be less common scenarios.
+
+ If unsure, say Y.
+
config DEBUG_INFO_DWARF4
- bool "Generate dwarf4 debuginfo"
- depends on $(cc-option,-gdwarf-4)
+ bool "Generate DWARF Version 4 debuginfo"
+ help
+ Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+ config select this.
+
+config DEBUG_INFO_DWARF5
+ bool "Generate DWARF Version 5 debuginfo"
+ depends on GCC_VERSION >= 50000 || CC_IS_CLANG
+ depends on CC_IS_GCC || $(success,$(srctree)/scripts/test_dwarf5_support.sh $(CC) $(CLANG_FLAGS))
+ depends on !DEBUG_INFO_BTF
help
- Generate dwarf4 debug info. This requires recent versions
- of gcc and gdb. It makes the debug information larger.
- But it significantly improves the success of resolving
- variables in gdb on optimized code.
+ Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+ 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
+ draft features until 7.0), and gdb 8.0+.
+
+ Changes to the structure of debug info in Version 5 allow for around
+ 15-18% savings in resulting image and debug info section sizes as
+ compared to DWARF Version 4. DWARF Version 5 standardizes previous
+ extensions such as accelerators for symbol indexing and the format
+ for fission (.dwo/.dwp) files. Users may not want to select this
+ config if they rely on tooling that has not yet been updated to
+ support DWARF Version 5.
+
+endchoice # "DWARF version"
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
@@ -901,6 +938,7 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
source "lib/Kconfig.kasan"
+source "lib/Kconfig.kfence"
endmenu # "Memory Debugging"
@@ -1325,13 +1363,53 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
config LOCKDEP_SMALL
bool
+config LOCKDEP_BITS
+ int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 15
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+
+config LOCKDEP_CHAINS_BITS
+ int "Bitsize for MAX_LOCKDEP_CHAINS"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 16
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
+
+config LOCKDEP_STACK_TRACE_BITS
+ int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 19
+ help
+ Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+
+config LOCKDEP_STACK_TRACE_HASH_BITS
+ int "Bitsize for STACK_TRACE_HASH_SIZE"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 14
+ help
+ Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES.
+
+config LOCKDEP_CIRCULAR_QUEUE_BITS
+ int "Bitsize for elements in circular_queue struct"
+ depends on LOCKDEP
+ range 10 30
+ default 12
+ help
+ Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
@@ -1627,7 +1705,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -1880,7 +1958,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
help
Provide stacktrace filter for fault-injection capabilities
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index f5fa4ba126bf..cffc2ebbf185 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -138,9 +138,10 @@ config KASAN_INLINE
endchoice
-config KASAN_STACK_ENABLE
+config KASAN_STACK
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
+ default y if CC_IS_GCC
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
@@ -154,11 +155,6 @@ config KASAN_STACK_ENABLE
CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
to use and enabled by default.
-config KASAN_STACK
- int
- default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
- default 0
-
config KASAN_SW_TAGS_IDENTIFY
bool "Enable memory corruption identification"
depends on KASAN_SW_TAGS
@@ -190,11 +186,11 @@ config KASAN_KUNIT_TEST
kernel debugging features like KASAN.
For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation in Documentation/dev-tools/kunit
+ to the KUnit documentation in Documentation/dev-tools/kunit.
-config TEST_KASAN_MODULE
+config KASAN_MODULE_TEST
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
- depends on m && KASAN
+ depends on m && KASAN && !KASAN_HW_TAGS
help
This is a part of the KASAN test suite that is incompatible with
KUnit. Currently includes tests that do bad copy_from/to_user
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
new file mode 100644
index 000000000000..78f50ccb3b45
--- /dev/null
+++ b/lib/Kconfig.kfence
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KFENCE
+ bool
+
+menuconfig KFENCE
+ bool "KFENCE: low-overhead sampling-based memory safety error detector"
+ depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ select STACKTRACE
+ help
+ KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
+ access, use-after-free, and invalid-free errors. KFENCE is designed
+ to have negligible cost to permit enabling it in production
+ environments.
+
+ See <file:Documentation/dev-tools/kfence.rst> for more details.
+
+ Note that, KFENCE is not a substitute for explicit testing with tools
+ such as KASAN. KFENCE can detect a subset of bugs that KASAN can
+ detect, albeit at very different performance profiles. If you can
+ afford to use KASAN, continue using KASAN, for example in test
+ environments. If your kernel targets production use, and cannot
+ enable KASAN due to its cost, consider using KFENCE.
+
+if KFENCE
+
+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations"
+ default y
+ depends on JUMP_LABEL # To ensure performance, require jump labels
+ help
+ Use static keys (static branches) to set up KFENCE allocations. Using
+ static keys is normally recommended, because it avoids a dynamic
+ branch in the allocator's fast path. However, with very low sample
+ intervals, or on systems that do not support jump labels, a dynamic
+ branch may still be an acceptable performance trade-off.
+
+config KFENCE_SAMPLE_INTERVAL
+ int "Default sample interval in milliseconds"
+ default 100
+ help
+ The KFENCE sample interval determines the frequency with which heap
+ allocations will be guarded by KFENCE. May be overridden via boot
+ parameter "kfence.sample_interval".
+
+ Set this to 0 to disable KFENCE by default, in which case only
+ setting "kfence.sample_interval" to a non-zero value enables KFENCE.
+
+config KFENCE_NUM_OBJECTS
+ int "Number of guarded objects available"
+ range 1 65535
+ default 255
+ help
+ The number of guarded objects available. For each KFENCE object, 2
+ pages are required; with one containing the object and two adjacent
+ ones used as guard pages.
+
+config KFENCE_STRESS_TEST_FAULTS
+ int "Stress testing of fault handling and error reporting" if EXPERT
+ default 0
+ help
+ The inverse probability with which to randomly protect KFENCE object
+ pages, resulting in spurious use-after-frees. The main purpose of
+ this option is to stress test KFENCE with concurrent error reports
+ and allocations/frees. A value of 0 disables stress testing logic.
+
+ Only for KFENCE testing; set to 0 if you are not a KFENCE developer.
+
+config KFENCE_KUNIT_TEST
+ tristate "KFENCE integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KFENCE, testing various error detection scenarios with
+ various allocation types, and checking that reports are correctly
+ output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif # KFENCE
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 3a0b1c930733..e5372a13511d 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -112,23 +112,6 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
-config UBSAN_SIGNED_OVERFLOW
- bool "Perform checking for signed arithmetic overflow"
- default UBSAN
- depends on $(cc-option,-fsanitize=signed-integer-overflow)
- help
- This option enables -fsanitize=signed-integer-overflow which checks
- for overflow of any arithmetic operations with signed integers.
-
-config UBSAN_UNSIGNED_OVERFLOW
- bool "Perform checking for unsigned arithmetic overflow"
- depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
- depends on !X86_32 # avoid excessive stack usage on x86-32/clang
- help
- This option enables -fsanitize=unsigned-integer-overflow which checks
- for overflow of any arithmetic operations with unsigned integers. This
- currently causes x86 to fail to boot.
-
config UBSAN_OBJECT_SIZE
bool "Perform checking for accesses beyond the end of objects"
default UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index fb7d946bb8c3..e11cfc18b6c0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -66,7 +66,7 @@ obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
-obj-$(CONFIG_TEST_KASAN_MODULE) += test_kasan_module.o
+obj-$(CONFIG_KASAN_MODULE_TEST) += test_kasan_module.o
CFLAGS_test_kasan_module.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
@@ -280,6 +280,7 @@ obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
+obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
new file mode 100644
index 000000000000..41e71aae3ef6
--- /dev/null
+++ b/lib/asn1_encoder.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple encoder primitives for ASN.1 BER/DER/CER
+ *
+ * Copyright (C) 2019 James.Bottomley@HansenPartnership.com
+ */
+
+#include <linux/asn1_encoder.h>
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+/**
+ * asn1_encode_integer() - encode positive integer to ASN.1
+ * @data: pointer to the pointer to the data
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @integer: integer to be encoded
+ *
+ * This is a simplified encoder: it only currently does
+ * positive integers, but it should be simple enough to add the
+ * negative case if a use comes along.
+ */
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer)
+{
+ int data_len = end_data - data;
+ unsigned char *d = &data[2];
+ bool found = false;
+ int i;
+
+ if (WARN(integer < 0,
+ "BUG: integer encode only supports positive integers"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need at least 3 bytes for tag, length and integer encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ /* remaining length where at d (the start of the integer encoding) */
+ data_len -= 2;
+
+ data[0] = _tag(UNIV, PRIM, INT);
+ if (integer == 0) {
+ *d++ = 0;
+ goto out;
+ }
+
+ for (i = sizeof(integer); i > 0 ; i--) {
+ int byte = integer >> (8 * (i - 1));
+
+ if (!found && byte == 0)
+ continue;
+
+ /*
+ * for a positive number the first byte must have bit
+ * 7 clear in two's complement (otherwise it's a
+ * negative number) so prepend a leading zero if
+ * that's not the case
+ */
+ if (!found && (byte & 0x80)) {
+ /*
+ * no check needed here, we already know we
+ * have len >= 1
+ */
+ *d++ = 0;
+ data_len--;
+ }
+
+ found = true;
+ if (data_len == 0)
+ return ERR_PTR(-EINVAL);
+
+ *d++ = byte;
+ data_len--;
+ }
+
+ out:
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_integer);
+
+/* calculate the base 128 digit values setting the top bit of the first octet */
+static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid)
+{
+ unsigned char *data = *_data;
+ int start = 7 + 7 + 7 + 7;
+ int ret = 0;
+
+ if (*data_len < 1)
+ return -EINVAL;
+
+ /* quick case */
+ if (oid == 0) {
+ *data++ = 0x80;
+ (*data_len)--;
+ goto out;
+ }
+
+ while (oid >> start == 0)
+ start -= 7;
+
+ while (start > 0 && *data_len > 0) {
+ u8 byte;
+
+ byte = oid >> start;
+ oid = oid - (byte << start);
+ start -= 7;
+ byte |= 0x80;
+ *data++ = byte;
+ (*data_len)--;
+ }
+
+ if (*data_len > 0) {
+ *data++ = oid;
+ (*data_len)--;
+ } else {
+ ret = -EINVAL;
+ }
+
+ out:
+ *_data = data;
+ return ret;
+}
+
+/**
+ * asn1_encode_oid() - encode an oid to ASN.1
+ * @data: position to begin encoding at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @oid: array of oids
+ * @oid_len: length of oid array
+ *
+ * this encodes an OID up to ASN.1 when presented as an array of OID values
+ */
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len)
+{
+ int data_len = end_data - data;
+ unsigned char *d = data + 2;
+ int i, ret;
+
+ if (WARN(oid_len < 2, "OID must have at least two elements"))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN(oid_len > 32, "OID is too large"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+
+ /* need at least 3 bytes for tag, length and OID encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ data[0] = _tag(UNIV, PRIM, OID);
+ *d++ = oid[0] * 40 + oid[1];
+
+ data_len -= 3;
+
+ ret = 0;
+
+ for (i = 2; i < oid_len; i++) {
+ ret = asn1_encode_oid_digit(&d, &data_len, oid[i]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_oid);
+
+/**
+ * asn1_encode_length() - encode a length to follow an ASN.1 tag
+ * @data: pointer to encode at
+ * @data_len: pointer to remaning length (adjusted by routine)
+ * @len: length to encode
+ *
+ * This routine can encode lengths up to 65535 using the ASN.1 rules.
+ * It will accept a negative length and place a zero length tag
+ * instead (to keep the ASN.1 valid). This convention allows other
+ * encoder primitives to accept negative lengths as singalling the
+ * sequence will be re-encoded when the length is known.
+ */
+static int asn1_encode_length(unsigned char **data, int *data_len, int len)
+{
+ if (*data_len < 1)
+ return -EINVAL;
+
+ if (len < 0) {
+ *((*data)++) = 0;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (len <= 0x7f) {
+ *((*data)++) = len;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (*data_len < 2)
+ return -EINVAL;
+
+ if (len <= 0xff) {
+ *((*data)++) = 0x81;
+ *((*data)++) = len & 0xff;
+ *data_len -= 2;
+ return 0;
+ }
+
+ if (*data_len < 3)
+ return -EINVAL;
+
+ if (len <= 0xffff) {
+ *((*data)++) = 0x82;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 3;
+ return 0;
+ }
+
+ if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff"))
+ return -EINVAL;
+
+ if (*data_len < 4)
+ return -EINVAL;
+ *((*data)++) = 0x83;
+ *((*data)++) = (len >> 16) & 0xff;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 4;
+
+ return 0;
+}
+
+/**
+ * asn1_encode_tag() - add a tag for optional or explicit value
+ * @data: pointer to place tag at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @tag: tag to be placed
+ * @string: the data to be tagged
+ * @len: the length of the data to be tagged
+ *
+ * Note this currently only handles short form tags < 31.
+ *
+ * Standard usage is to pass in a @tag, @string and @length and the
+ * @string will be ASN.1 encoded with @tag and placed into @data. If
+ * the encoding would put data past @end_data then an error is
+ * returned, otherwise a pointer to a position one beyond the encoding
+ * is returned.
+ *
+ * To encode in place pass a NULL @string and -1 for @len and the
+ * maximum allowable beginning and end of the data; all this will do
+ * is add the current maximum length and update the data pointer to
+ * the place where the tag contents should be placed is returned. The
+ * data should be copied in by the calling routine which should then
+ * repeat the prior statement but now with the known length. In order
+ * to avoid having to keep both before and after pointers, the repeat
+ * expects to be called with @data pointing to where the first encode
+ * returned it and still NULL for @string but the real length in @len.
+ */
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (WARN(tag > 30, "ASN.1 tag can't be > 30"))
+ return ERR_PTR(-EINVAL);
+
+ if (!string && WARN(len > 127,
+ "BUG: recode tag is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!string && len > 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * tag and install a dummy length because the real
+ * data_len should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tagn(CONT, CONS, tag);
+ data_len--;
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!string)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_tag);
+
+/**
+ * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @string: string to be encoded
+ * @len: length of string
+ *
+ * Note ASN.1 octet strings may contain zeros, so the length is obligatory.
+ */
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need minimum of 2 bytes for tag and length of zero length string */
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, OTS);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_octet_string);
+
+/**
+ * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @seq: data to be encoded as a sequence
+ * @len: length of the data to be encoded as a sequence
+ *
+ * Fill in a sequence. To encode in place, pass NULL for @seq and -1
+ * for @len; then call again once the length is known (still with NULL
+ * for @seq). In order to avoid having to keep both before and after
+ * pointers, the repeat expects to be called with @data pointing to
+ * where the first encode placed it.
+ */
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (!seq && WARN(len > 127,
+ "BUG: recode sequence is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!seq && len >= 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * sequence and install a dummy length because the
+ * real length should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, CONS, SEQ);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!seq)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, seq, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_sequence);
+
+/**
+ * asn1_encode_boolean() - encode a boolean value to ASN.1
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @val: the boolean true/false value
+ */
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val)
+{
+ int data_len = end_data - data;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, BOOL);
+ data_len--;
+
+ asn1_encode_length(&data, &data_len, 1);
+
+ if (val)
+ *(data++) = 1;
+ else
+ *(data++) = 0;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_boolean);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index 7103440c0ee1..8f9d537bfb2a 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -91,8 +91,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
char *secstrings;
unsigned int i;
- lockdep_assert_held(&module_mutex);
-
mod->bug_table = NULL;
mod->num_bugs = 0;
@@ -118,7 +116,6 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
void module_bug_cleanup(struct module *mod)
{
- lockdep_assert_held(&module_mutex);
list_del_rcu(&mod->bug_list);
}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index dfd4c4423f9a..5d474c626e24 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -228,7 +228,6 @@ char *next_arg(char *args, char **param, char **val)
{
unsigned int i, equals = 0;
int in_quote = 0, quoted = 0;
- char *next;
if (*args == '"') {
args++;
@@ -266,10 +265,10 @@ char *next_arg(char *args, char **param, char **val)
if (args[i]) {
args[i] = '\0';
- next = args + i + 1;
+ args += i + 1;
} else
- next = args + i;
+ args += i;
/* Chew up trailing spaces. */
- return skip_spaces(next);
+ return skip_spaces(args);
}
diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c
index 4ccbec442469..b748fd3d256e 100644
--- a/lib/crypto/chacha.c
+++ b/lib/crypto/chacha.c
@@ -64,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds)
}
/**
- * chacha_block - generate one keystream block and increment block counter
+ * chacha_block_generic - generate one keystream block and increment block counter
* @state: input state matrix (16 32-bit words)
* @stream: output keystream block (64 bytes)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(chacha_block_generic);
/**
* hchacha_block_generic - abbreviated ChaCha core, for XChaCha
* @state: input state matrix (16 32-bit words)
- * @out: output (8 32-bit words)
+ * @stream: output (8 32-bit words)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
index 3cc77d94390b..7fb71845cc84 100644
--- a/lib/crypto/poly1305-donna32.c
+++ b/lib/crypto/poly1305-donna32.c
@@ -10,7 +10,8 @@
#include <asm/unaligned.h>
#include <crypto/internal/poly1305.h>
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
index 6ae181bb4345..d34cf4053668 100644
--- a/lib/crypto/poly1305-donna64.c
+++ b/lib/crypto/poly1305-donna64.c
@@ -12,7 +12,8 @@
typedef __uint128_t u128;
-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
{
u64 t0, t1;
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
index 9d2d14df0fee..26d87fc3823e 100644
--- a/lib/crypto/poly1305.c
+++ b/lib/crypto/poly1305.c
@@ -12,7 +12,8 @@
#include <linux/module.h>
#include <asm/unaligned.h>
-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE])
{
poly1305_core_setkey(&desc->core_r, key);
desc->s[0] = get_unaligned_le32(key + 16);
diff --git a/lib/devres.c b/lib/devres.c
index 2a4ff5d64288..4679dbb1bf5f 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -10,6 +10,7 @@ enum devm_ioremap_type {
DEVM_IOREMAP = 0,
DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
+ DEVM_IOREMAP_NP,
};
void devm_ioremap_release(struct device *dev, void *res)
@@ -42,6 +43,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
+ case DEVM_IOREMAP_NP:
+ addr = ioremap_np(offset, size);
+ break;
}
if (addr) {
@@ -99,6 +103,21 @@ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap_wc);
/**
+ * devm_ioremap_np - Managed ioremap_np()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_np(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
+}
+EXPORT_SYMBOL(devm_ioremap_np);
+
+/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
@@ -128,6 +147,9 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
return IOMEM_ERR_PTR(-EINVAL);
}
+ if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
+ type = DEVM_IOREMAP_NP;
+
size = resource_size(res);
if (res->name)
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index e83628882001..7921193f0424 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -40,7 +40,7 @@ enum cpio_fields {
};
/**
- * cpio_data find_cpio_data - Search for files in an uncompressed cpio
+ * find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
* @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
@@ -49,7 +49,7 @@ enum cpio_fields {
* matching file itself. It can be used to iterate through the cpio
* to find all files inside of a directory path.
*
- * @return: struct cpio_data containing the address, length and
+ * Return: &struct cpio_data containing the address, length and
* filename (with the directory path cut off) of the found file.
* If you search for a filename and not for files in a directory,
* pass the absolute path of the filename in the cpio and make sure
diff --git a/lib/extable.c b/lib/extable.c
index c3e59caf7ffa..9c9f40bd2b3d 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
}
#endif
-#ifndef ARCH_HAS_SORT_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#else
@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
-#endif /* !ARCH_HAS_SORT_EXTABLE */
-
-#ifndef ARCH_HAS_SEARCH_EXTABLE
static int cmp_ex_search(const void *key, const void *elt)
{
@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search);
}
-#endif
diff --git a/lib/genalloc.c b/lib/genalloc.c
index dab97bb69df6..5dcf9cdcbc46 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -81,7 +81,8 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
+static unsigned long
+bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned long size = start + nr;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index d8ca336ff9cd..61228a6c69f8 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -76,7 +76,44 @@
} \
}
-#define iterate_all_kinds(i, n, v, I, B, K) { \
+#define iterate_xarray(i, n, __v, skip, STEP) { \
+ struct page *head = NULL; \
+ size_t wanted = n, seg, offset; \
+ loff_t start = i->xarray_start + skip; \
+ pgoff_t index = start >> PAGE_SHIFT; \
+ int j; \
+ \
+ XA_STATE(xas, i->xarray, index); \
+ \
+ rcu_read_lock(); \
+ xas_for_each(&xas, head, ULONG_MAX) { \
+ if (xas_retry(&xas, head)) \
+ continue; \
+ if (WARN_ON(xa_is_value(head))) \
+ break; \
+ if (WARN_ON(PageHuge(head))) \
+ break; \
+ for (j = (head->index < index) ? index - head->index : 0; \
+ j < thp_nr_pages(head); j++) { \
+ __v.bv_page = head + j; \
+ offset = (i->xarray_start + skip) & ~PAGE_MASK; \
+ seg = PAGE_SIZE - offset; \
+ __v.bv_offset = offset; \
+ __v.bv_len = min(n, seg); \
+ (void)(STEP); \
+ n -= __v.bv_len; \
+ skip += __v.bv_len; \
+ if (n == 0) \
+ break; \
+ } \
+ if (n == 0) \
+ break; \
+ } \
+ rcu_read_unlock(); \
+ n = wanted - n; \
+}
+
+#define iterate_all_kinds(i, n, v, I, B, K, X) { \
if (likely(n)) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
@@ -88,6 +125,9 @@
struct kvec v; \
iterate_kvec(i, n, v, kvec, skip, (K)) \
} else if (unlikely(i->type & ITER_DISCARD)) { \
+ } else if (unlikely(i->type & ITER_XARRAY)) { \
+ struct bio_vec v; \
+ iterate_xarray(i, n, v, skip, (X)); \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -96,7 +136,7 @@
} \
}
-#define iterate_and_advance(i, n, v, I, B, K) { \
+#define iterate_and_advance(i, n, v, I, B, K, X) { \
if (unlikely(i->count < n)) \
n = i->count; \
if (i->count) { \
@@ -121,6 +161,9 @@
i->kvec = kvec; \
} else if (unlikely(i->type & ITER_DISCARD)) { \
skip += n; \
+ } else if (unlikely(i->type & ITER_XARRAY)) { \
+ struct bio_vec v; \
+ iterate_xarray(i, n, v, skip, (X)) \
} else { \
const struct iovec *iov; \
struct iovec v; \
@@ -464,20 +507,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
-{
- char *from = kmap_atomic(page);
- memcpy(to, from + offset, len);
- kunmap_atomic(from);
-}
-
-static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
-{
- char *to = kmap_atomic(page);
- memcpy(to + offset, from, len);
- kunmap_atomic(to);
-}
-
static void memzero_page(struct page *page, size_t offset, size_t len)
{
char *addr = kmap_atomic(page);
@@ -636,7 +665,9 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
+ memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+ memcpy_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len)
)
return bytes;
@@ -752,6 +783,18 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
bytes = curr_addr - s_addr - rem;
return bytes;
}
+ }),
+ ({
+ rem = copy_mc_to_page(v.bv_page, v.bv_offset,
+ (from += v.bv_len) - v.bv_len, v.bv_len);
+ if (rem) {
+ curr_addr = (unsigned long) from;
+ bytes = curr_addr - s_addr - rem;
+ rcu_read_unlock();
+ i->iov_offset += bytes;
+ i->count -= bytes;
+ return bytes;
+ }
})
)
@@ -773,7 +816,9 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -799,7 +844,9 @@ bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
@@ -819,7 +866,9 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -854,7 +903,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len)
+ v.iov_len),
+ memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
return bytes;
@@ -878,7 +929,9 @@ bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
0;}),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
iov_iter_advance(i, bytes);
@@ -915,7 +968,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
{
if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0;
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
@@ -938,7 +991,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
WARN_ON(1);
return 0;
}
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
+ if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) {
void *kaddr = kmap_atomic(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
@@ -982,7 +1035,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len)
+ memset(v.iov_base, 0, v.iov_len),
+ memzero_page(v.bv_page, v.bv_offset, v.bv_len)
)
return bytes;
@@ -1006,7 +1060,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+ memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
+ memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len)
)
kunmap_atomic(kaddr);
return bytes;
@@ -1092,11 +1148,17 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
i->count -= size;
return;
}
+ if (unlikely(iov_iter_is_xarray(i))) {
+ size = min(size, i->count);
+ i->iov_offset += size;
+ i->count -= size;
+ return;
+ }
if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
return;
}
- iterate_and_advance(i, size, v, 0, 0, 0)
+ iterate_and_advance(i, size, v, 0, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1140,7 +1202,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return;
}
unroll -= i->iov_offset;
- if (iov_iter_is_bvec(i)) {
+ if (iov_iter_is_xarray(i)) {
+ BUG(); /* We should never go beyond the start of the specified
+ * range since we might then be straying into pages that
+ * aren't pinned.
+ */
+ } else if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
@@ -1177,9 +1244,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
return i->count; // it is a silly place, anyway
if (i->nr_segs == 1)
return i->count;
- if (unlikely(iov_iter_is_discard(i)))
+ if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i)))
return i->count;
- else if (iov_iter_is_bvec(i))
+ if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
else
return min(i->count, i->iov->iov_len - i->iov_offset);
@@ -1228,6 +1295,31 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
EXPORT_SYMBOL(iov_iter_pipe);
/**
+ * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @xarray: The xarray to access.
+ * @start: The start file position.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator to either draw data out of the pages attached to an
+ * inode or to inject data into those pages. The pages *must* be prevented
+ * from evaporation, either by taking a ref on them or locking them by the
+ * caller.
+ */
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
+ struct xarray *xarray, loff_t start, size_t count)
+{
+ BUG_ON(direction & ~1);
+ i->type = ITER_XARRAY | (direction & (READ | WRITE));
+ i->xarray = xarray;
+ i->xarray_start = start;
+ i->count = count;
+ i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_xarray);
+
+/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
@@ -1257,10 +1349,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
return size | i->iov_offset;
return size;
}
+ if (unlikely(iov_iter_is_xarray(i)))
+ return (i->xarray_start + i->iov_offset) | i->count;
iterate_all_kinds(i, size, v,
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len
+ res |= (unsigned long)v.iov_base | v.iov_len,
+ res |= v.bv_offset | v.bv_len
)
return res;
}
@@ -1282,7 +1377,9 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
(size != v.bv_len ? size : 0)),
(res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0))
+ (size != v.iov_len ? size : 0)),
+ (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+ (size != v.bv_len ? size : 0))
);
return res;
}
@@ -1332,6 +1429,75 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
}
+static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
+ pgoff_t index, unsigned int nr_pages)
+{
+ XA_STATE(xas, xa, index);
+ struct page *page;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ pages[ret] = find_subpage(page, xas.xa_index);
+ get_page(pages[ret]);
+ if (++ret == nr_pages)
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ struct page **pages, size_t maxsize,
+ unsigned maxpages, size_t *_start_offset)
+{
+ unsigned nr, offset;
+ pgoff_t index, count;
+ size_t size = maxsize, actual;
+ loff_t pos;
+
+ if (!size || !maxpages)
+ return 0;
+
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ count += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ count++;
+ }
+
+ if (count > maxpages)
+ count = maxpages;
+
+ nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ actual = PAGE_SIZE * nr;
+ actual -= offset;
+ if (nr == count && size > 0) {
+ unsigned last_offset = (nr > 1) ? 0 : offset;
+ actual -= PAGE_SIZE - (last_offset + size);
+ }
+ return actual;
+}
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
@@ -1341,6 +1507,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
+ if (unlikely(iov_iter_is_xarray(i)))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
if (unlikely(iov_iter_is_discard(i)))
return -EFAULT;
@@ -1367,7 +1535,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- })
+ }),
+ 0
)
return 0;
}
@@ -1411,6 +1580,51 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
return n;
}
+static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ size_t *_start_offset)
+{
+ struct page **p;
+ unsigned nr, offset;
+ pgoff_t index, count;
+ size_t size = maxsize, actual;
+ loff_t pos;
+
+ if (!size)
+ return 0;
+
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ count += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ count++;
+ }
+
+ p = get_pages_array(count);
+ if (!p)
+ return -ENOMEM;
+ *pages = p;
+
+ nr = iter_xarray_populate_pages(p, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ actual = PAGE_SIZE * nr;
+ actual -= offset;
+ if (nr == count && size > 0) {
+ unsigned last_offset = (nr > 1) ? 0 : offset;
+ actual -= PAGE_SIZE - (last_offset + size);
+ }
+ return actual;
+}
+
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
@@ -1422,6 +1636,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start);
+ if (unlikely(iov_iter_is_xarray(i)))
+ return iter_xarray_get_pages_alloc(i, pages, maxsize, start);
if (unlikely(iov_iter_is_discard(i)))
return -EFAULT;
@@ -1454,7 +1670,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
return v.bv_len;
}),({
return -EFAULT;
- })
+ }), 0
)
return 0;
}
@@ -1492,6 +1708,13 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
*csum = sum;
@@ -1533,6 +1756,13 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
v.iov_base, v.iov_len,
sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
*csum = sum;
@@ -1579,6 +1809,13 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
(from += v.iov_len) - v.iov_len,
v.iov_len, sum, off);
off += v.iov_len;
+ }), ({
+ char *p = kmap_atomic(v.bv_page);
+ sum = csum_and_memcpy(p + v.bv_offset,
+ (from += v.bv_len) - v.bv_len,
+ v.bv_len, sum, off);
+ kunmap_atomic(p);
+ off += v.bv_len;
})
)
csstate->csum = sum;
@@ -1629,6 +1866,21 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
if (npages >= maxpages)
return maxpages;
+ } else if (unlikely(iov_iter_is_xarray(i))) {
+ unsigned offset;
+
+ offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK;
+
+ npages = 1;
+ if (size > PAGE_SIZE - offset) {
+ size -= PAGE_SIZE - offset;
+ npages += size >> PAGE_SHIFT;
+ size &= ~PAGE_MASK;
+ if (size)
+ npages++;
+ }
+ if (npages >= maxpages)
+ return maxpages;
} else iterate_all_kinds(i, size, v, ({
unsigned long p = (unsigned long)v.iov_base;
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
@@ -1645,7 +1897,8 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
- p / PAGE_SIZE;
if (npages >= maxpages)
return maxpages;
- })
+ }),
+ 0
)
return npages;
}
@@ -1658,7 +1911,7 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
WARN_ON(1);
return NULL;
}
- if (unlikely(iov_iter_is_discard(new)))
+ if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new)))
return NULL;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
@@ -1863,7 +2116,12 @@ int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
kunmap(v.bv_page);
err;}), ({
w = v;
- err = f(&w, context);})
+ err = f(&w, context);}), ({
+ w.iov_base = kmap(v.bv_page) + v.bv_offset;
+ w.iov_len = v.bv_len;
+ err = f(&w, context);
+ kunmap(v.bv_page);
+ err;})
)
return err;
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7998affa45d4..c87d5b6a8a55 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
{
+ int buffer_size = sizeof(env->buf) - env->buflen;
int len;
- len = strlcpy(&env->buf[env->buflen], subsystem,
- sizeof(env->buf) - env->buflen);
- if (len >= (sizeof(env->buf) - env->buflen)) {
- WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len >= buffer_size) {
+ pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
+ buffer_size, len);
return -ENOMEM;
}
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 52f0c258c895..a926d96ffd44 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -7,16 +7,13 @@
#include <linux/list_sort.h>
#include <linux/list.h>
-typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
- struct list_head const *, struct list_head const *);
-
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
__attribute__((nonnull(2,3,4)))
-static struct list_head *merge(void *priv, cmp_func cmp,
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
struct list_head *a, struct list_head *b)
{
struct list_head *head, **tail = &head;
@@ -52,7 +49,7 @@ static struct list_head *merge(void *priv, cmp_func cmp,
* throughout.
*/
__attribute__((nonnull(2,3,4,5)))
-static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
@@ -185,9 +182,7 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
*/
__attribute__((nonnull(2,3)))
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
{
struct list_head *list = head->next, *pending = NULL;
size_t count = 0; /* Count of pending */
@@ -227,7 +222,7 @@ void list_sort(void *priv, struct list_head *head,
if (likely(bits)) {
struct list_head *a = *tail, *b = a->prev;
- a = merge(priv, (cmp_func)cmp, b, a);
+ a = merge(priv, cmp, b, a);
/* Install the merged result in place of the inputs */
a->prev = b->prev;
*tail = a;
@@ -249,10 +244,10 @@ void list_sort(void *priv, struct list_head *head,
if (!next)
break;
- list = merge(priv, (cmp_func)cmp, pending, list);
+ list = merge(priv, cmp, pending, list);
pending = next;
}
/* The final merge, rebuilding prev links */
- merge_final(priv, (cmp_func)cmp, head, pending, list);
+ merge_final(priv, cmp, head, pending, list);
}
EXPORT_SYMBOL(list_sort);
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index f32fe481b492..07b4b9a1f54b 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -28,6 +28,8 @@ static DEFINE_MUTEX(io_range_mutex);
* @new_range: pointer to the IO range to be registered.
*
* Returns 0 on success, the error code in case of failure.
+ * If the range already exists, -EEXIST will be returned, which should be
+ * considered a success.
*
* Register a new IO range node in the IO range list.
*/
@@ -51,6 +53,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
+ ret = -EEXIST;
goto end_register;
}
if (range->flags == LOGIC_PIO_CPU_MMIO &&
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index c69ee53d8dde..52313acbfa62 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc)
/**
* lc_create - prepares to track objects in an active set
* @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @cache: cache root pointer
* @max_pending_changes: maximum changes to accumulate until a transaction is required
* @e_count: number of elements allowed to be active simultaneously
* @e_size: size of the tracked objects
@@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
}
/**
- * lc_dump - Dump a complete LRU cache to seq in textual form.
+ * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied additional "heading" or other info
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 064d68a5391a..46866394fc84 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -232,4 +232,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
return res + div64_u64(a * b, c);
}
+EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index f7ad43f28579..3dfaa836e7c5 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
+#include <linux/asn1.h>
#include "oid_registry_data.c"
MODULE_DESCRIPTION("OID Registry");
@@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize)
}
EXPORT_SYMBOL_GPL(look_up_OID);
+/**
+ * parse_OID - Parse an OID from a bytestream
+ * @data: Binary representation of the header + OID
+ * @datasize: Size of the binary representation
+ * @oid: Pointer to oid to return result
+ *
+ * Parse an OID from a bytestream that holds the OID in the format
+ * ASN1_OID | length | oid. The length indicator must equal to datasize - 2.
+ * -EBADMSG is returned if the bytestream is too short.
+ */
+int parse_OID(const void *data, size_t datasize, enum OID *oid)
+{
+ const unsigned char *v = data;
+
+ /* we need 2 bytes of header and at least 1 byte for oid */
+ if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2)
+ return -EBADMSG;
+
+ *oid = look_up_OID(data + 2, datasize - 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(parse_OID);
+
/*
* sprint_OID - Print an Object Identifier into a buffer
* @data: The encoded OID to print
diff --git a/lib/parman.c b/lib/parman.c
index a11f2f667639..3f8f8d422e62 100644
--- a/lib/parman.c
+++ b/lib/parman.c
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy);
* parman_prio_init - initializes a parman priority chunk
* @parman: parman instance
* @prio: parman prio structure to be initialized
- * @prority: desired priority of the chunk
+ * @priority: desired priority of the chunk
*
* Note: all locking must be provided by the caller.
*
@@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio,
EXPORT_SYMBOL(parman_item_add);
/**
- * parman_item_del - deletes parman item
+ * parman_item_remove - deletes parman item
* @parman: parman instance
* @prio: parman prio instance to delete the item from
* @item: parman item instance
diff --git a/lib/parser.c b/lib/parser.c
index f5b3e5d7a7f9..7a5769db389f 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -11,7 +11,7 @@
#include <linux/string.h>
/**
- * match_one: - Determines if a string matches a simple pattern
+ * match_one - Determines if a string matches a simple pattern
* @s: the string to examine for presence of the pattern
* @p: the string containing the pattern
* @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
@@ -89,7 +89,7 @@ static int match_one(char *s, const char *p, substring_t args[])
}
/**
- * match_token: - Find a token (and optional args) in a string
+ * match_token - Find a token (and optional args) in a string
* @s: the string to examine for token/argument pairs
* @table: match_table_t describing the set of allowed option tokens and the
* arguments that may be associated with them. Must be terminated with a
@@ -114,7 +114,7 @@ int match_token(char *s, const match_table_t table, substring_t args[])
EXPORT_SYMBOL(match_token);
/**
- * match_number: scan a number in the given base from a substring_t
+ * match_number - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting integer on success
* @base: base to use when converting string
@@ -147,7 +147,7 @@ static int match_number(substring_t *s, int *result, int base)
}
/**
- * match_u64int: scan a number in the given base from a substring_t
+ * match_u64int - scan a number in the given base from a substring_t
* @s: substring to be scanned
* @result: resulting u64 on success
* @base: base to use when converting string
@@ -174,7 +174,7 @@ static int match_u64int(substring_t *s, u64 *result, int base)
}
/**
- * match_int: - scan a decimal representation of an integer from a substring_t
+ * match_int - scan a decimal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -188,8 +188,30 @@ int match_int(substring_t *s, int *result)
}
EXPORT_SYMBOL(match_int);
+/*
+ * match_uint - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer. On
+ * success, sets @result to the integer represented by the string and returns 0.
+ * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ */
+int match_uint(substring_t *s, unsigned int *result)
+{
+ int err = -ENOMEM;
+ char *buf = match_strdup(s);
+
+ if (buf) {
+ err = kstrtouint(buf, 10, result);
+ kfree(buf);
+ }
+ return err;
+}
+EXPORT_SYMBOL(match_uint);
+
/**
- * match_u64: - scan a decimal representation of a u64 from
+ * match_u64 - scan a decimal representation of a u64 from
* a substring_t
* @s: substring_t to be scanned
* @result: resulting unsigned long long on success
@@ -206,7 +228,7 @@ int match_u64(substring_t *s, u64 *result)
EXPORT_SYMBOL(match_u64);
/**
- * match_octal: - scan an octal representation of an integer from a substring_t
+ * match_octal - scan an octal representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -221,7 +243,7 @@ int match_octal(substring_t *s, int *result)
EXPORT_SYMBOL(match_octal);
/**
- * match_hex: - scan a hex representation of an integer from a substring_t
+ * match_hex - scan a hex representation of an integer from a substring_t
* @s: substring_t to be scanned
* @result: resulting integer on success
*
@@ -236,7 +258,7 @@ int match_hex(substring_t *s, int *result)
EXPORT_SYMBOL(match_hex);
/**
- * match_wildcard: - parse if a string matches given wildcard pattern
+ * match_wildcard - parse if a string matches given wildcard pattern
* @pattern: wildcard pattern
* @str: the string to be parsed
*
@@ -287,7 +309,7 @@ bool match_wildcard(const char *pattern, const char *str)
EXPORT_SYMBOL(match_wildcard);
/**
- * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
+ * match_strlcpy - Copy the characters from a substring_t to a sized buffer
* @dest: where to copy to
* @src: &substring_t to copy
* @size: size of destination buffer
@@ -310,7 +332,7 @@ size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
EXPORT_SYMBOL(match_strlcpy);
/**
- * match_strdup: - allocate a new string with the contents of a substring_t
+ * match_strdup - allocate a new string with the contents of a substring_t
* @s: &substring_t to copy
*
* Description: Allocates and returns a string filled with the contents of
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3a4da11b804d..b3afafe46fff 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
/**
* radix_tree_find_next_bit - find the next set bit in a memory region
*
- * @addr: The address to base the search on
- * @size: The bitmap size in bits
- * @offset: The bitnumber to start searching at
+ * @node: where to begin the search
+ * @tag: the tag index
+ * @offset: the bitnumber to start searching at
*
* Unrollable variant of find_next_bit() for constant size arrays.
* Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
@@ -461,7 +461,7 @@ out:
/**
* radix_tree_shrink - shrink radix tree to minimum height
- * @root radix tree root
+ * @root: radix tree root
*/
static inline bool radix_tree_shrink(struct radix_tree_root *root)
{
@@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node,
}
/**
- * __radix_tree_insert - insert into a radix tree
+ * radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
* @item: item to insert
@@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
/**
* radix_tree_iter_replace - replace item in a slot
* @root: radix tree root
+ * @iter: iterator state
* @slot: pointer to slot
* @item: new item to store in the slot.
*
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 890dcc2e984e..49f67a0c6e5d 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -31,6 +31,7 @@
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/memblock.h>
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
@@ -141,14 +142,38 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
return stack;
}
-#define STACK_HASH_ORDER 20
-#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
#define STACK_HASH_SEED 0x9747b28c
-static struct stack_record *stack_table[STACK_HASH_SIZE] = {
- [0 ... STACK_HASH_SIZE - 1] = NULL
-};
+static bool stack_depot_disable;
+static struct stack_record **stack_table;
+
+static int __init is_stack_depot_disabled(char *str)
+{
+ int ret;
+
+ ret = kstrtobool(str, &stack_depot_disable);
+ if (!ret && stack_depot_disable) {
+ pr_info("Stack Depot is disabled\n");
+ stack_table = NULL;
+ }
+ return 0;
+}
+early_param("stack_depot_disable", is_stack_depot_disabled);
+
+int __init stack_depot_init(void)
+{
+ if (!stack_depot_disable) {
+ size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ int i;
+
+ stack_table = memblock_alloc(size, size);
+ for (i = 0; i < STACK_HASH_SIZE; i++)
+ stack_table[i] = NULL;
+ }
+ return 0;
+}
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
@@ -242,7 +267,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned long flags;
u32 hash;
- if (unlikely(nr_entries == 0))
+ if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 2947274cc2d3..785e724ce0d8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/printk.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -28,10 +29,9 @@
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
- * We assign some test results to these globals to make sure the tests
- * are not eliminated as dead code.
+ * Some tests use these global variables to store return values from function
+ * calls that could otherwise be eliminated by the compiler as dead code.
*/
-
void *kasan_ptr_result;
int kasan_int_result;
@@ -39,40 +39,86 @@ static struct kunit_resource resource;
static struct kunit_kasan_expectation fail_data;
static bool multishot;
+/*
+ * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
+ * first detected bug and panic the kernel if panic_on_warn is enabled. For
+ * hardware tag-based KASAN also allow tag checking to be reenabled for each
+ * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
+ */
static int kasan_test_init(struct kunit *test)
{
- /*
- * Temporarily enable multi-shot mode and set panic_on_warn=0.
- * Otherwise, we'd only get a report for the first case.
- */
- multishot = kasan_save_enable_multi_shot();
+ if (!kasan_enabled()) {
+ kunit_err(test, "can't run KASAN tests with KASAN disabled");
+ return -1;
+ }
+ multishot = kasan_save_enable_multi_shot();
+ kasan_set_tagging_report_once(false);
return 0;
}
static void kasan_test_exit(struct kunit *test)
{
+ kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
}
/**
- * KUNIT_EXPECT_KASAN_FAIL() - Causes a test failure when the expression does
- * not cause a KASAN error. This uses a KUnit resource named "kasan_data." Do
- * Do not use this name for a KUnit resource outside here.
+ * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
+ * KASAN report; causes a test failure otherwise. This relies on a KUnit
+ * resource named "kasan_data". Do not use this name for KUnit resources
+ * outside of KASAN tests.
+ *
+ * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
+ * checking is auto-disabled. When this happens, this test handler reenables
+ * tag checking. As tag checking can be only disabled or enabled per CPU,
+ * this handler disables migration (preemption).
*
+ * Since the compiler doesn't see that the expression can change the fail_data
+ * fields, it can reorder or optimize away the accesses to those fields.
+ * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
+ * expression to prevent that.
*/
-#define KUNIT_EXPECT_KASAN_FAIL(test, condition) do { \
- fail_data.report_expected = true; \
- fail_data.report_found = false; \
- kunit_add_named_resource(test, \
- NULL, \
- NULL, \
- &resource, \
- "kasan_data", &fail_data); \
- condition; \
- KUNIT_EXPECT_EQ(test, \
- fail_data.report_expected, \
- fail_data.report_found); \
+#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
+ !kasan_async_mode_enabled()) \
+ migrate_disable(); \
+ WRITE_ONCE(fail_data.report_expected, true); \
+ WRITE_ONCE(fail_data.report_found, false); \
+ kunit_add_named_resource(test, \
+ NULL, \
+ NULL, \
+ &resource, \
+ "kasan_data", &fail_data); \
+ barrier(); \
+ expression; \
+ barrier(); \
+ if (kasan_async_mode_enabled()) \
+ kasan_force_async_fault(); \
+ barrier(); \
+ KUNIT_EXPECT_EQ(test, \
+ READ_ONCE(fail_data.report_expected), \
+ READ_ONCE(fail_data.report_found)); \
+ if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
+ !kasan_async_mode_enabled()) { \
+ if (READ_ONCE(fail_data.report_found)) \
+ kasan_enable_tagging_sync(); \
+ migrate_enable(); \
+ } \
+} while (0)
+
+#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
+ if (!IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " required"); \
+ return; \
+ } \
+} while (0)
+
+#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
+ if (IS_ENABLED(config)) { \
+ kunit_info((test), "skipping, " #config " enabled"); \
+ return; \
+ } \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
@@ -111,23 +157,24 @@ static void kmalloc_node_oob_right(struct kunit *test)
kfree(ptr);
}
+/*
+ * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
+ * fit into a slab cache and therefore is allocated via the page allocator
+ * fallback. Since this kind of fallback is only implemented for SLUB, these
+ * tests are limited to that allocator.
+ */
static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
- /* Allocate a chunk that does not fit into a SLUB cache to trigger
- * the page allocator fallback.
- */
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
+
kfree(ptr);
}
@@ -136,15 +183,12 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
kfree(ptr);
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}
@@ -153,10 +197,7 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- if (!IS_ENABLED(CONFIG_SLUB)) {
- kunit_info(test, "CONFIG_SLUB is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -164,11 +205,49 @@ static void kmalloc_pagealloc_invalid_free(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
+static void pagealloc_oob_right(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+ size_t size = (1UL << (PAGE_SHIFT + order));
+
+ /*
+ * With generic KASAN page allocations have no redzones, thus
+ * out-of-bounds detection is not guaranteed.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ free_pages((unsigned long)ptr, order);
+}
+
+static void pagealloc_uaf(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ size_t order = 4;
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ free_pages((unsigned long)ptr, order);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+}
+
static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
- /* Allocate a chunk that is large enough, but still fits into a slab
+
+ /*
+ * Allocate a chunk that is large enough, but still fits into a slab
* and does not trigger the page allocator fallback in SLUB.
*/
ptr = kmalloc(size, GFP_KERNEL);
@@ -178,11 +257,14 @@ static void kmalloc_large_oob_right(struct kunit *test)
kfree(ptr);
}
-static void kmalloc_oob_krealloc_more(struct kunit *test)
+static void krealloc_more_oob_helper(struct kunit *test,
+ size_t size1, size_t size2)
{
char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 19;
+ size_t middle;
+
+ KUNIT_ASSERT_LT(test, size1, size2);
+ middle = size1 + (size2 - size1) / 2;
ptr1 = kmalloc(size1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -190,15 +272,31 @@ static void kmalloc_oob_krealloc_more(struct kunit *test)
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
+ /* All offsets up to size2 must be accessible. */
+ ptr2[size1 - 1] = 'x';
+ ptr2[size1] = 'x';
+ ptr2[middle] = 'x';
+ ptr2[size2 - 1] = 'x';
+
+ /* Generic mode is precise, so unaligned size2 must be inaccessible. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
+
+ /* For all modes first aligned offset after size2 must be inaccessible. */
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
+
kfree(ptr2);
}
-static void kmalloc_oob_krealloc_less(struct kunit *test)
+static void krealloc_less_oob_helper(struct kunit *test,
+ size_t size1, size_t size2)
{
char *ptr1, *ptr2;
- size_t size1 = 17;
- size_t size2 = 15;
+ size_t middle;
+
+ KUNIT_ASSERT_LT(test, size2, size1);
+ middle = size2 + (size1 - size2) / 2;
ptr1 = kmalloc(size1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -206,10 +304,79 @@ static void kmalloc_oob_krealloc_less(struct kunit *test)
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
+ /* Must be accessible for all modes. */
+ ptr2[size2 - 1] = 'x';
+
+ /* Generic mode is precise, so unaligned size2 must be inaccessible. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
+
+ /* For all modes first aligned offset after size2 must be inaccessible. */
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
+
+ /*
+ * For all modes all size2, middle, and size1 should land in separate
+ * granules and thus the latter two offsets should be inaccessible.
+ */
+ KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
+ round_down(middle, KASAN_GRANULE_SIZE));
+ KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
+ round_down(size1, KASAN_GRANULE_SIZE));
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
+
kfree(ptr2);
}
+static void krealloc_more_oob(struct kunit *test)
+{
+ krealloc_more_oob_helper(test, 201, 235);
+}
+
+static void krealloc_less_oob(struct kunit *test)
+{
+ krealloc_less_oob_helper(test, 235, 201);
+}
+
+static void krealloc_pagealloc_more_oob(struct kunit *test)
+{
+ /* page_alloc fallback in only implemented for SLUB. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+
+ krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
+ KMALLOC_MAX_CACHE_SIZE + 235);
+}
+
+static void krealloc_pagealloc_less_oob(struct kunit *test)
+{
+ /* page_alloc fallback in only implemented for SLUB. */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+
+ krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
+ KMALLOC_MAX_CACHE_SIZE + 201);
+}
+
+/*
+ * Check that krealloc() detects a use-after-free, returns NULL,
+ * and doesn't unpoison the freed object.
+ */
+static void krealloc_uaf(struct kunit *test)
+{
+ char *ptr1, *ptr2;
+ int size1 = 201;
+ int size2 = 235;
+
+ ptr1 = kmalloc(size1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+ kfree(ptr1);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
+ KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
+}
+
static void kmalloc_oob_16(struct kunit *test)
{
struct {
@@ -217,10 +384,7 @@ static void kmalloc_oob_16(struct kunit *test)
} *ptr1, *ptr2;
/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -355,7 +519,9 @@ static void kmalloc_uaf2(struct kunit *test)
{
char *ptr1, *ptr2;
size_t size = 43;
+ int counter = 0;
+again:
ptr1 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -364,6 +530,15 @@ static void kmalloc_uaf2(struct kunit *test)
ptr2 = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ /*
+ * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
+ * Allow up to 16 attempts at generating different tags.
+ */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
+ kfree(ptr2);
+ goto again;
+ }
+
KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
@@ -402,10 +577,11 @@ static void kmem_cache_oob(struct kunit *test)
{
char *p;
size_t size = 200;
- struct kmem_cache *cache = kmem_cache_create("test_cache",
- size, 0,
- 0, NULL);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p) {
kunit_err(test, "Allocation failed: %s\n", __func__);
@@ -414,11 +590,12 @@ static void kmem_cache_oob(struct kunit *test)
}
KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
+
kmem_cache_free(cache, p);
kmem_cache_destroy(cache);
}
-static void memcg_accounted_kmem_cache(struct kunit *test)
+static void kmem_cache_accounted(struct kunit *test)
{
int i;
char *p;
@@ -445,6 +622,31 @@ free_cache:
kmem_cache_destroy(cache);
}
+static void kmem_cache_bulk(struct kunit *test)
+{
+ struct kmem_cache *cache;
+ size_t size = 200;
+ char *p[10];
+ bool ret;
+ int i;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
+ if (!ret) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(p); i++)
+ p[i][0] = p[i][size - 1] = 42;
+
+ kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
+ kmem_cache_destroy(cache);
+}
+
static char global_array[10];
static void kasan_global_oob(struct kunit *test)
@@ -453,14 +655,12 @@ static void kasan_global_oob(struct kunit *test)
char *p = &global_array[ARRAY_SIZE(global_array) + i];
/* Only generic mode instruments globals. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
+/* Check that ksize() makes the whole object accessible. */
static void ksize_unpoisons_memory(struct kunit *test)
{
char *ptr;
@@ -469,11 +669,32 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
real_size = ksize(ptr);
- /* This access doesn't trigger an error. */
+
+ /* This access shouldn't trigger a KASAN report. */
ptr[size] = 'x';
- /* This one does. */
+
+ /* This one must. */
KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
+
+ kfree(ptr);
+}
+
+/*
+ * Check that a use-after-free is detected by ksize() and via normal accesses
+ * after it.
+ */
+static void ksize_uaf(struct kunit *test)
+{
+ char *ptr;
+ int size = 128 - KASAN_GRANULE_SIZE;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
}
static void kasan_stack_oob(struct kunit *test)
@@ -482,10 +703,7 @@ static void kasan_stack_oob(struct kunit *test)
volatile int i = OOB_TAG_OFF;
char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -497,15 +715,8 @@ static void kasan_alloca_oob_left(struct kunit *test)
char *p = alloca_array - 1;
/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -517,15 +728,8 @@ static void kasan_alloca_oob_right(struct kunit *test)
char *p = alloca_array + i;
/* Only generic mode instruments dynamic allocas. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required");
- return;
- }
-
- if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
- kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -568,7 +772,7 @@ static void kmem_cache_invalid_free(struct kunit *test)
return;
}
- /* Trigger invalid free, the object doesn't get freed */
+ /* Trigger invalid free, the object doesn't get freed. */
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
/*
@@ -585,12 +789,11 @@ static void kasan_memchr(struct kunit *test)
char *ptr;
size_t size = 24;
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -610,12 +813,11 @@ static void kasan_memcmp(struct kunit *test)
size_t size = 24;
int arr[9];
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
@@ -634,12 +836,11 @@ static void kasan_strings(struct kunit *test)
char *ptr;
size_t size = 24;
- /* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
- kunit_info(test,
- "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
- return;
- }
+ /*
+ * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -700,13 +901,10 @@ static void kasan_bitops_generic(struct kunit *test)
long *bits;
/* This test is specifically crafted for the generic mode. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
/*
- * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
* this way we do not actually corrupt other memory.
*/
bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
@@ -731,19 +929,16 @@ static void kasan_bitops_tags(struct kunit *test)
{
long *bits;
- /* This test is specifically crafted for the tag-based mode. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
- return;
- }
+ /* This test is specifically crafted for tag-based modes. */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
- /* Allocation size will be rounded to up granule size, which is 16. */
- bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+ /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
+ bits = kzalloc(48, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
- /* Do the accesses past the 16 allocated bytes. */
- kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
- kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
+ /* Do the accesses past the 48 allocated bytes, but within the redone. */
+ kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
kfree(bits);
}
@@ -764,10 +959,7 @@ static void vmalloc_oob(struct kunit *test)
{
void *area;
- if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
- return;
- }
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
/*
* We have to be careful not to hit the guard page.
@@ -780,6 +972,94 @@ static void vmalloc_oob(struct kunit *test)
vfree(area);
}
+/*
+ * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
+ * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
+ * modes.
+ */
+static void match_all_not_assigned(struct kunit *test)
+{
+ char *ptr;
+ struct page *pages;
+ int i, size, order;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ for (i = 0; i < 256; i++) {
+ size = (get_random_int() % 1024) + 1;
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ kfree(ptr);
+ }
+
+ for (i = 0; i < 256; i++) {
+ order = (get_random_int() % 4) + 1;
+ pages = alloc_pages(GFP_KERNEL, order);
+ ptr = page_address(pages);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+ KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+ free_pages((unsigned long)ptr, order);
+ }
+}
+
+/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
+static void match_all_ptr_tag(struct kunit *test)
+{
+ char *ptr;
+ u8 tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ /* Backup the assigned tag. */
+ tag = get_tag(ptr);
+ KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
+
+ /* Reset the tag to 0xff.*/
+ ptr = set_tag(ptr, KASAN_TAG_KERNEL);
+
+ /* This access shouldn't trigger a KASAN report. */
+ *ptr = 0;
+
+ /* Recover the pointer tag and free. */
+ ptr = set_tag(ptr, tag);
+ kfree(ptr);
+}
+
+/* Check that there are no match-all memory tags for tag-based modes. */
+static void match_all_mem_tag(struct kunit *test)
+{
+ char *ptr;
+ int tag;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr = kmalloc(128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
+
+ /* For each possible tag value not matching the pointer tag. */
+ for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
+ if (tag == get_tag(ptr))
+ continue;
+
+ /* Mark the first memory granule with the chosen memory tag. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag);
+
+ /* This access must cause a KASAN report. */
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
+ }
+
+ /* Recover the memory tag and free. */
+ kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr));
+ kfree(ptr);
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
@@ -787,9 +1067,14 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_pagealloc_oob_right),
KUNIT_CASE(kmalloc_pagealloc_uaf),
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
+ KUNIT_CASE(pagealloc_oob_right),
+ KUNIT_CASE(pagealloc_uaf),
KUNIT_CASE(kmalloc_large_oob_right),
- KUNIT_CASE(kmalloc_oob_krealloc_more),
- KUNIT_CASE(kmalloc_oob_krealloc_less),
+ KUNIT_CASE(krealloc_more_oob),
+ KUNIT_CASE(krealloc_less_oob),
+ KUNIT_CASE(krealloc_pagealloc_more_oob),
+ KUNIT_CASE(krealloc_pagealloc_less_oob),
+ KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
@@ -804,12 +1089,14 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
- KUNIT_CASE(memcg_accounted_kmem_cache),
+ KUNIT_CASE(kmem_cache_accounted),
+ KUNIT_CASE(kmem_cache_bulk),
KUNIT_CASE(kasan_global_oob),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(ksize_uaf),
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kasan_memchr),
@@ -819,6 +1106,9 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
+ KUNIT_CASE(match_all_not_assigned),
+ KUNIT_CASE(match_all_ptr_tag),
+ KUNIT_CASE(match_all_mem_tag),
{}
};
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index 3b4cc77992d2..f1017f345d6c 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void)
char *kmem;
char __user *usermem;
size_t size = 10;
- int unused;
+ int __maybe_unused unused;
kmem = kmalloc(size, GFP_KERNEL);
if (!kmem)
@@ -123,8 +123,9 @@ static noinline void __init kasan_workqueue_uaf(void)
static int __init test_kasan_module_init(void)
{
/*
- * Temporarily enable multi-shot mode. Otherwise, we'd only get a
- * report for the first case.
+ * Temporarily enable multi-shot mode. Otherwise, KASAN would only
+ * report the first detected bug and panic the kernel if panic_on_warn
+ * is enabled.
*/
bool multishot = kasan_save_enable_multi_shot();
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 1f017d3b610e..00daaf23316f 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -56,7 +56,8 @@ static int __init check(struct debug_el *ela, struct debug_el *elb)
return 0;
}
-static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+static int __init cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct debug_el *ela, *elb;
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5e5d9355ef49..7e7bbd0f3fd2 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -11,51 +11,6 @@ typedef void(*test_ubsan_fp)(void);
#config, IS_ENABLED(config) ? "y" : "n"); \
} while (0)
-static void test_ubsan_add_overflow(void)
-{
- volatile int val = INT_MAX;
- volatile unsigned int uval = UINT_MAX;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val += 2;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval += 2;
-}
-
-static void test_ubsan_sub_overflow(void)
-{
- volatile int val = INT_MIN;
- volatile unsigned int uval = 0;
- volatile int val2 = 2;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val -= val2;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval -= val2;
-}
-
-static void test_ubsan_mul_overflow(void)
-{
- volatile int val = INT_MAX / 2;
- volatile unsigned int uval = UINT_MAX / 2;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val *= 3;
-
- UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
- uval *= 3;
-}
-
-static void test_ubsan_negate_overflow(void)
-{
- volatile int val = INT_MIN;
-
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
- val = -val;
-}
-
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
@@ -155,10 +110,6 @@ static void test_ubsan_object_size_mismatch(void)
}
static const test_ubsan_fp test_ubsan_array[] = {
- test_ubsan_add_overflow,
- test_ubsan_sub_overflow,
- test_ubsan_mul_overflow,
- test_ubsan_negate_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 8294f43f4981..8b1c318189ce 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
#ifdef CONFIG_XARRAY_MULTI
static void check_split_1(struct xarray *xa, unsigned long index,
- unsigned int order)
+ unsigned int order, unsigned int new_order)
{
- XA_STATE(xas, xa, index);
- void *entry;
- unsigned int i = 0;
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
xas_split(&xas, xa, order);
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
xas_unlock(&xas);
- xa_for_each(xa, index, entry) {
- XA_BUG_ON(xa, entry != xa);
- i++;
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
}
- XA_BUG_ON(xa, i != 1 << order);
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
static noinline void check_split(struct xarray *xa)
{
- unsigned int order;
+ unsigned int order, new_order;
XA_BUG_ON(xa, !xa_empty(xa));
for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
- check_split_1(xa, 0, order);
- check_split_1(xa, 1UL << order, order);
- check_split_1(xa, 3UL << order, order);
+ for (new_order = 0; new_order < order; new_order++) {
+ check_split_1(xa, 0, order, new_order);
+ check_split_1(xa, 1UL << order, order, new_order);
+ check_split_1(xa, 3UL << order, order, new_order);
+ }
}
}
#else
diff --git a/lib/ubsan.c b/lib/ubsan.c
index bec38c64d6a6..26229973049d 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -163,74 +163,6 @@ static void ubsan_epilogue(void)
}
}
-static void handle_overflow(struct overflow_data *data, void *lhs,
- void *rhs, char op)
-{
-
- struct type_descriptor *type = data->type;
- char lhs_val_str[VALUE_LENGTH];
- char rhs_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, type_is_signed(type) ?
- "signed-integer-overflow" :
- "unsigned-integer-overflow");
-
- val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
- val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
- pr_err("%s %c %s cannot be represented in type %s\n",
- lhs_val_str,
- op,
- rhs_val_str,
- type->type_name);
-
- ubsan_epilogue();
-}
-
-void __ubsan_handle_add_overflow(void *data,
- void *lhs, void *rhs)
-{
-
- handle_overflow(data, lhs, rhs, '+');
-}
-EXPORT_SYMBOL(__ubsan_handle_add_overflow);
-
-void __ubsan_handle_sub_overflow(void *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '-');
-}
-EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
-
-void __ubsan_handle_mul_overflow(void *data,
- void *lhs, void *rhs)
-{
- handle_overflow(data, lhs, rhs, '*');
-}
-EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
-
-void __ubsan_handle_negate_overflow(void *_data, void *old_val)
-{
- struct overflow_data *data = _data;
- char old_val_str[VALUE_LENGTH];
-
- if (suppress_report(&data->location))
- return;
-
- ubsan_prologue(&data->location, "negation-overflow");
-
- val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
-
- pr_err("negation of %s cannot be represented in type %s:\n",
- old_val_str, data->type->type_name);
-
- ubsan_epilogue();
-}
-EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
-
-
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
struct overflow_data *data = _data;
diff --git a/lib/xarray.c b/lib/xarray.c
index 5fa51614802a..f5d8f54907b4 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
* @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
* @gfp: Memory allocation flags.
*
* This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
do {
unsigned int i;
- void *sibling;
+ void *sibling = NULL;
struct xa_node *node;
node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
for (i = 0; i < XA_CHUNK_SIZE; i++) {
if ((i & mask) == 0) {
RCU_INIT_POINTER(node->slots[i], entry);
- sibling = xa_mk_sibling(0);
+ sibling = xa_mk_sibling(i);
} else {
RCU_INIT_POINTER(node->slots[i], sibling);
}
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
* xas_split() - Split a multi-index entry into smaller entries.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
*
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries.
*
* Context: Any context. The caller should hold the xa_lock.
*/