summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug36
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bcd.c4
-rw-r--r--lib/buildid.c397
-rw-r--r--lib/checksum_kunit.c9
-rw-r--r--lib/closure.c2
-rw-r--r--lib/decompress_unxz.c40
-rw-r--r--lib/dim/Makefile2
-rw-r--r--lib/dump_stack.c1
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/fortify_kunit.c3
-rw-r--r--lib/generic-radix-tree.c80
-rw-r--r--lib/glob.c2
-rw-r--r--lib/kunit/Makefile4
-rw-r--r--lib/kunit/platform-test.c224
-rw-r--r--lib/kunit/platform.c302
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/lru_cache.c10
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c805
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/div64.c115
-rw-r--r--lib/math/test_mul_u64_u64_div_u64.c99
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
-rw-r--r--lib/test_bits.c34
-rw-r--r--lib/test_fpu_glue.c2
-rw-r--r--lib/test_hmm.c5
-rw-r--r--lib/test_objpool.c3
-rw-r--r--lib/test_printf.c26
-rw-r--r--lib/vsprintf.c21
-rw-r--r--lib/xz/Kconfig13
-rw-r--r--lib/xz/xz_crc32.c11
-rw-r--r--lib/xz/xz_dec_bcj.c191
-rw-r--r--lib/xz/xz_dec_lzma2.c15
-rw-r--r--lib/xz/xz_dec_stream.c13
-rw-r--r--lib/xz/xz_dec_syms.c14
-rw-r--r--lib/xz/xz_dec_test.c12
-rw-r--r--lib/xz/xz_lzma2.h5
-rw-r--r--lib/xz/xz_private.h40
-rw-r--r--lib/xz/xz_stream.h5
-rw-r--r--lib/zstd/compress/zstd_compress.c2
-rw-r--r--lib/zstd/zstd_compress_module.c49
-rw-r--r--lib/zstd/zstd_decompress_module.c36
48 files changed, 1890 insertions, 780 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 38e58f74fd2b..7315f643817a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -379,13 +379,15 @@ config DEBUG_INFO_BTF
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
- depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ depends on PAHOLE_VERSION >= 116
+ depends on DEBUG_INFO_DWARF4 || PAHOLE_VERSION >= 121
# pahole uses elfutils, which does not have support for Hexagon relocations
depends on !HEXAGON
help
Generate deduplicated BTF type information from DWARF debug info.
- Turning this on expects presence of pahole tool, which will convert
- DWARF type info into equivalent deduplicated BTF type info.
+ Turning this on requires pahole v1.16 or later (v1.21 or later to
+ support DWARF 5), which will convert DWARF type info into equivalent
+ deduplicated BTF type info.
config PAHOLE_HAS_SPLIT_BTF
def_bool PAHOLE_VERSION >= 119
@@ -571,6 +573,21 @@ config VMLINUX_MAP
pieces of code get eliminated with
CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+config BUILTIN_MODULE_RANGES
+ bool "Generate address range information for builtin modules"
+ depends on !LTO
+ depends on VMLINUX_MAP
+ help
+ When modules are built into the kernel, there will be no module name
+ associated with its symbols in /proc/kallsyms. Tracers may want to
+ identify symbols by module name and symbol name regardless of whether
+ the module is configured as loadable or not.
+
+ This option generates modules.builtin.ranges in the build tree with
+ offset ranges (per ELF section) for the module(s) they belong to.
+ It also records an anchor symbol to determine the load address of the
+ section.
+
config DEBUG_FORCE_WEAK_PER_CPU
bool "Force weak per-cpu definitions"
depends on DEBUG_KERNEL
@@ -1515,7 +1532,7 @@ config LOCKDEP_BITS
config LOCKDEP_CHAINS_BITS
int "Bitsize for MAX_LOCKDEP_CHAINS"
depends on LOCKDEP && !LOCKDEP_SMALL
- range 10 30
+ range 10 21
default 16
help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
@@ -2289,6 +2306,16 @@ config TEST_DIV64
If unsure, say N.
+config TEST_MULDIV64
+ tristate "mul_u64_u64_div_u64() test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'mul_u64_u64_div_u64()' function test.
+ This test is executed only once during system boot (so affects
+ only boot time), or at module load time.
+
+ If unsure, say N.
+
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2625,6 +2652,7 @@ config RESOURCE_KUNIT_TEST
tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
+ select GET_FREE_REGION
help
This builds the resource API unit test.
Tests the logic of API provided by resource.c and ioport.h.
diff --git a/lib/Makefile b/lib/Makefile
index 2f1c5a9277af..773adf88af41 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -14,6 +14,7 @@ KCOV_INSTRUMENT_list_debug.o := n
KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_dynamic_debug.o := n
KCOV_INSTRUMENT_fault-inject.o := n
+KCOV_INSTRUMENT_find_bit.o := n
# string.o implements standard library functions like memset/memcpy etc.
# Use -ffreestanding to ensure that the compiler does not try to "optimize"
diff --git a/lib/bcd.c b/lib/bcd.c
index 7e4750b6e801..c5e79ba9cd7b 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -10,6 +10,8 @@ EXPORT_SYMBOL(_bcd2bin);
unsigned char _bin2bcd(unsigned val)
{
- return ((val / 10) << 4) + val % 10;
+ const unsigned int t = (val * 103) >> 10;
+
+ return (t << 4) | (val - t * 10);
}
EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/buildid.c b/lib/buildid.c
index e02b5507418b..290641d92ac1 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -8,154 +8,302 @@
#define BUILD_ID 3
+#define MAX_PHDR_CNT 256
+
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault)
+{
+ memset(r, 0, sizeof(*r));
+ r->buf = buf;
+ r->buf_sz = buf_sz;
+ r->file = file;
+ r->may_fault = may_fault;
+}
+
+static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
+{
+ memset(r, 0, sizeof(*r));
+ r->data = data;
+ r->data_sz = data_sz;
+}
+
+static void freader_put_folio(struct freader *r)
+{
+ if (!r->folio)
+ return;
+ kunmap_local(r->addr);
+ folio_put(r->folio);
+ r->folio = NULL;
+}
+
+static int freader_get_folio(struct freader *r, loff_t file_off)
+{
+ /* check if we can just reuse current folio */
+ if (r->folio && file_off >= r->folio_off &&
+ file_off < r->folio_off + folio_size(r->folio))
+ return 0;
+
+ freader_put_folio(r);
+
+ r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
+
+ /* if sleeping is allowed, wait for the page, if necessary */
+ if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
+ filemap_invalidate_lock_shared(r->file->f_mapping);
+ r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
+ NULL, r->file);
+ filemap_invalidate_unlock_shared(r->file->f_mapping);
+ }
+
+ if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
+ if (!IS_ERR(r->folio))
+ folio_put(r->folio);
+ r->folio = NULL;
+ return -EFAULT;
+ }
+
+ r->folio_off = folio_pos(r->folio);
+ r->addr = kmap_local_folio(r->folio, 0);
+
+ return 0;
+}
+
+static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz)
+{
+ size_t folio_sz;
+
+ /* provided internal temporary buffer should be sized correctly */
+ if (WARN_ON(r->buf && sz > r->buf_sz)) {
+ r->err = -E2BIG;
+ return NULL;
+ }
+
+ if (unlikely(file_off + sz < file_off)) {
+ r->err = -EOVERFLOW;
+ return NULL;
+ }
+
+ /* working with memory buffer is much more straightforward */
+ if (!r->buf) {
+ if (file_off + sz > r->data_sz) {
+ r->err = -ERANGE;
+ return NULL;
+ }
+ return r->data + file_off;
+ }
+
+ /* fetch or reuse folio for given file offset */
+ r->err = freader_get_folio(r, file_off);
+ if (r->err)
+ return NULL;
+
+ /* if requested data is crossing folio boundaries, we have to copy
+ * everything into our local buffer to keep a simple linear memory
+ * access interface
+ */
+ folio_sz = folio_size(r->folio);
+ if (file_off + sz > r->folio_off + folio_sz) {
+ int part_sz = r->folio_off + folio_sz - file_off;
+
+ /* copy the part that resides in the current folio */
+ memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz);
+
+ /* fetch next folio */
+ r->err = freader_get_folio(r, r->folio_off + folio_sz);
+ if (r->err)
+ return NULL;
+
+ /* copy the rest of requested data */
+ memcpy(r->buf + part_sz, r->addr, sz - part_sz);
+
+ return r->buf;
+ }
+
+ /* if data fits in a single folio, just return direct pointer */
+ return r->addr + (file_off - r->folio_off);
+}
+
+static void freader_cleanup(struct freader *r)
+{
+ if (!r->buf)
+ return; /* non-file-backed mode */
+
+ freader_put_folio(r);
+}
+
/*
* Parse build id from the note segment. This logic can be shared between
* 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
* identical.
*/
-static int parse_build_id_buf(unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size,
+ loff_t note_off, Elf32_Word note_size)
{
- Elf32_Word note_offs = 0, new_offs;
+ const char note_name[] = "GNU";
+ const size_t note_name_sz = sizeof(note_name);
+ u32 build_id_off, new_off, note_end, name_sz, desc_sz;
+ const Elf32_Nhdr *nhdr;
+ const char *data;
+
+ if (check_add_overflow(note_off, note_size, &note_end))
+ return -EINVAL;
- while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
- Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+ while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) {
+ nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz);
+ if (!nhdr)
+ return r->err;
+
+ name_sz = READ_ONCE(nhdr->n_namesz);
+ desc_sz = READ_ONCE(nhdr->n_descsz);
+
+ new_off = note_off + sizeof(Elf32_Nhdr);
+ if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
+ check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
+ new_off > note_end)
+ break;
if (nhdr->n_type == BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU") &&
- !strcmp((char *)(nhdr + 1), "GNU") &&
- nhdr->n_descsz > 0 &&
- nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
- memcpy(build_id,
- note_start + note_offs +
- ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- nhdr->n_descsz);
- memset(build_id + nhdr->n_descsz, 0,
- BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ name_sz == note_name_sz &&
+ memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
+ desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
+ build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4);
+
+ /* freader_fetch() will invalidate nhdr pointer */
+ data = freader_fetch(r, build_id_off, desc_sz);
+ if (!data)
+ return r->err;
+
+ memcpy(build_id, data, desc_sz);
+ memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
if (size)
- *size = nhdr->n_descsz;
+ *size = desc_sz;
return 0;
}
- new_offs = note_offs + sizeof(Elf32_Nhdr) +
- ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
- if (new_offs <= note_offs) /* overflow */
- break;
- note_offs = new_offs;
+
+ note_off = new_off;
}
return -EINVAL;
}
-static inline int parse_build_id(const void *page_addr,
- unsigned char *build_id,
- __u32 *size,
- const void *note_start,
- Elf32_Word note_size)
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size)
{
- /* check for overflow */
- if (note_start < page_addr || note_start + note_size < note_start)
- return -EINVAL;
+ const Elf32_Ehdr *ehdr;
+ const Elf32_Phdr *phdr;
+ __u32 phnum, phoff, i;
- /* only supports note that fits in the first page */
- if (note_start + note_size > page_addr + PAGE_SIZE)
- return -EINVAL;
+ ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr));
+ if (!ehdr)
+ return r->err;
- return parse_build_id_buf(build_id, size, note_start, note_size);
-}
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
-/* Parse build ID from 32-bit ELF */
-static int get_build_id_32(const void *page_addr, unsigned char *build_id,
- __u32 *size)
-{
- Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
- Elf32_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf32_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
return -EINVAL;
}
/* Parse build ID from 64-bit ELF */
-static int get_build_id_64(const void *page_addr, unsigned char *build_id,
- __u32 *size)
+static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size)
{
- Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
- Elf64_Phdr *phdr;
- int i;
-
- /*
- * FIXME
- * Neither ELF spec nor ELF loader require that program headers
- * start immediately after ELF header.
- */
- if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
- return -EINVAL;
- /* only supports phdr that fits in one page */
- if (ehdr->e_phnum >
- (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ const Elf64_Ehdr *ehdr;
+ const Elf64_Phdr *phdr;
+ __u32 phnum, i;
+ __u64 phoff;
+
+ ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr));
+ if (!ehdr)
+ return r->err;
+
+ /* subsequent freader_fetch() calls invalidate pointers, so remember locally */
+ phnum = READ_ONCE(ehdr->e_phnum);
+ phoff = READ_ONCE(ehdr->e_phoff);
+
+ /* set upper bound on amount of segments (phdrs) we iterate */
+ if (phnum > MAX_PHDR_CNT)
+ phnum = MAX_PHDR_CNT;
+
+ /* check that phoff is not large enough to cause an overflow */
+ if (phoff + phnum * sizeof(Elf64_Phdr) < phoff)
return -EINVAL;
- phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+ for (i = 0; i < phnum; ++i) {
+ phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr));
+ if (!phdr)
+ return r->err;
- for (i = 0; i < ehdr->e_phnum; ++i) {
- if (phdr[i].p_type == PT_NOTE &&
- !parse_build_id(page_addr, build_id, size,
- page_addr + phdr[i].p_offset,
- phdr[i].p_filesz))
+ if (phdr->p_type == PT_NOTE &&
+ !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset),
+ READ_ONCE(phdr->p_filesz)))
return 0;
}
+
return -EINVAL;
}
-/*
- * Parse build ID of ELF file mapped to vma
- * @vma: vma object
- * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
- * @size: returns actual build id size in case of success
- *
- * Return: 0 on success, -EINVAL otherwise
- */
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size)
+/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
+#define MAX_FREADER_BUF_SZ 64
+
+static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size, bool may_fault)
{
- Elf32_Ehdr *ehdr;
- struct page *page;
- void *page_addr;
+ const Elf32_Ehdr *ehdr;
+ struct freader r;
+ char buf[MAX_FREADER_BUF_SZ];
int ret;
/* only works for page backed storage */
if (!vma->vm_file)
return -EINVAL;
- page = find_get_page(vma->vm_file->f_mapping, 0);
- if (!page)
- return -EFAULT; /* page not mapped */
+ freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
+
+ /* fetch first 18 bytes of ELF header for checks */
+ ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
+ if (!ehdr) {
+ ret = r.err;
+ goto out;
+ }
ret = -EINVAL;
- page_addr = kmap_local_page(page);
- ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
@@ -166,15 +314,46 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
goto out;
if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
- ret = get_build_id_32(page_addr, build_id, size);
+ ret = get_build_id_32(&r, build_id, size);
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
- ret = get_build_id_64(page_addr, build_id, size);
+ ret = get_build_id_64(&r, build_id, size);
out:
- kunmap_local(page_addr);
- put_page(page);
+ freader_cleanup(&r);
return ret;
}
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes no page fault can be taken, so if relevant portions of ELF file are
+ * not already paged in, fetching of build ID fails.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, false /* !may_fault */);
+}
+
+/*
+ * Parse build ID of ELF file mapped to VMA
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Assumes faultable context and can cause page faults to bring in file data
+ * into page cache.
+ *
+ * Return: 0 on success; negative error, otherwise
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
+{
+ return __build_id_parse(vma, build_id, size, true /* may_fault */);
+}
+
/**
* build_id_parse_buf - Get build ID from a buffer
* @buf: ELF note section(s) to parse
@@ -185,7 +364,15 @@ out:
*/
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
{
- return parse_build_id_buf(build_id, NULL, buf, buf_size);
+ struct freader r;
+ int err;
+
+ freader_init_from_mem(&r, buf, buf_size);
+
+ err = parse_build_id(&r, build_id, NULL, 0, buf_size);
+
+ freader_cleanup(&r);
+ return err;
}
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 4e4d081a1d3b..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -468,12 +468,9 @@ static __wsum to_wsum(u32 x)
static void assert_setup_correct(struct kunit *test)
{
- CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN);
- CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]),
- MAX_LEN);
- CHECK_EQ(sizeof(init_sums_no_overflow) /
- sizeof(init_sums_no_overflow[0]),
- MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(random_buf), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(expected_results), MAX_LEN);
+ CHECK_EQ(ARRAY_SIZE(init_sums_no_overflow), MAX_LEN);
}
/*
diff --git a/lib/closure.c b/lib/closure.c
index 116afae2eed9..2bfe7d2a0048 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -278,7 +278,7 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
- seq_puts(f, "\n");
+ seq_putc(f, '\n');
}
spin_unlock_irq(&closure_list_lock);
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 842894158944..32138bb8ef77 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -103,12 +102,11 @@
#ifdef STATIC
# define XZ_PREBOOT
#else
-#include <linux/decompress/unxz.h>
+# include <linux/decompress/unxz.h>
#endif
#ifdef __KERNEL__
# include <linux/decompress/mm.h>
#endif
-#define XZ_EXTERN STATIC
#ifndef XZ_PREBOOT
# include <linux/slab.h>
@@ -127,11 +125,21 @@
#ifdef CONFIG_X86
# define XZ_DEC_X86
#endif
-#ifdef CONFIG_PPC
+#if defined(CONFIG_PPC) && defined(CONFIG_CPU_BIG_ENDIAN)
# define XZ_DEC_POWERPC
#endif
#ifdef CONFIG_ARM
-# define XZ_DEC_ARM
+# ifdef CONFIG_THUMB2_KERNEL
+# define XZ_DEC_ARMTHUMB
+# else
+# define XZ_DEC_ARM
+# endif
+#endif
+#ifdef CONFIG_ARM64
+# define XZ_DEC_ARM64
+#endif
+#ifdef CONFIG_RISCV
+# define XZ_DEC_RISCV
#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
@@ -220,7 +228,7 @@ void *memmove(void *dest, const void *src, size_t size)
#endif
/*
- * Since we need memmove anyway, would use it as memcpy too.
+ * Since we need memmove anyway, we could use it as memcpy too.
* Commented out for now to avoid breaking things.
*/
/*
@@ -390,17 +398,17 @@ error_alloc_state:
}
/*
- * This macro is used by architecture-specific files to decompress
+ * This function is used by architecture-specific files to decompress
* the kernel image.
*/
#ifdef XZ_PREBOOT
-STATIC int INIT __decompress(unsigned char *buf, long len,
- long (*fill)(void*, unsigned long),
- long (*flush)(void*, unsigned long),
- unsigned char *out_buf, long olen,
- long *pos,
- void (*error)(char *x))
+STATIC int INIT __decompress(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long out_size,
+ long *in_used,
+ void (*error)(char *x))
{
- return unxz(buf, len, fill, flush, out_buf, pos, error);
+ return unxz(in, in_size, fill, flush, out, in_used, error);
}
#endif
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index c4cc4026c451..5b9bfaac7ac1 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_DIMLIB) += dimlib.o
-dimlib-objs := dim.o net_dim.o rdma_dim.o
+dimlib-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 1a996fbbf50a..388da1aea14a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -73,6 +73,7 @@ void dump_stack_print_info(const char *log_lvl)
print_worker_info(log_lvl, current);
print_stop_info(log_lvl, current);
+ print_scx_info(log_lvl, current);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index f2c5e7910bb1..5a007952f7f2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1147,7 +1147,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
iter->table->mod_name, dp->function,
ddebug_describe_flags(dp->flags, &flags));
seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
- seq_puts(m, "\"");
+ seq_putc(m, '"');
if (dp->class_id != _DPRINTK_CLASS_DFLT) {
class = ddebug_class_name(iter, dp);
@@ -1156,7 +1156,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
else
seq_printf(m, " class unknown, _id:%d", dp->class_id);
}
- seq_puts(m, "\n");
+ seq_putc(m, '\n');
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index d608f9b48c10..52eb6ba29698 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index f9ad60a9c7bd..ecb638d4cde1 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -306,8 +306,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
orig = kvmalloc(prev_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker(((expected_pages) * PAGE_SIZE) * 2, \
- kvrealloc(orig, prev_size, \
- ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvrealloc(orig, ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
kvfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index fa692c86f069..79e067b51488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,99 +5,31 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
-#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
-
-struct genradix_node {
- union {
- /* Interior node: */
- struct genradix_node *children[GENRADIX_ARY];
-
- /* Leaf: */
- u8 data[GENRADIX_NODE_SIZE];
- };
-};
-
-static inline int genradix_depth_shift(unsigned depth)
-{
- return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
-}
-
-/*
- * Returns size (of data, in bytes) that a tree of a given depth holds:
- */
-static inline size_t genradix_depth_size(unsigned depth)
-{
- return 1UL << genradix_depth_shift(depth);
-}
-
-/* depth that's needed for a genradix that can address up to ULONG_MAX: */
-#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
-
-#define GENRADIX_DEPTH_MASK \
- ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-
-static inline unsigned genradix_root_to_depth(struct genradix_root *r)
-{
- return (unsigned long) r & GENRADIX_DEPTH_MASK;
-}
-
-static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
-{
- return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, or NULL if not
* allocated
*/
void *__genradix_ptr(struct __genradix *radix, size_t offset)
{
- struct genradix_root *r = READ_ONCE(radix->root);
- struct genradix_node *n = genradix_root_to_node(r);
- unsigned level = genradix_root_to_depth(r);
-
- if (ilog2(offset) >= genradix_depth_shift(level))
- return NULL;
-
- while (1) {
- if (!n)
- return NULL;
- if (!level)
- break;
-
- level--;
-
- n = n->children[offset >> genradix_depth_shift(level)];
- offset &= genradix_depth_size(level) - 1;
- }
-
- return &n->data[offset];
+ return __genradix_ptr_inlined(radix, offset);
}
EXPORT_SYMBOL(__genradix_ptr);
-static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
-{
- return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
-}
-
-static inline void genradix_free_node(struct genradix_node *node)
-{
- kfree(node);
-}
-
/*
* Returns pointer to the specified byte @offset within @radix, allocating it if
* necessary - newly allocated slots are always zeroed out:
*/
void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ struct genradix_node **preallocated,
gfp_t gfp_mask)
{
struct genradix_root *v = READ_ONCE(radix->root);
struct genradix_node *n, *new_node = NULL;
unsigned level;
+ if (preallocated)
+ swap(new_node, *preallocated);
+
/* Increase tree depth if necessary: */
while (1) {
struct genradix_root *r = v, *new_root;
@@ -281,7 +213,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
size_t offset;
for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
- if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask))
return -ENOMEM;
return 0;
diff --git a/lib/glob.c b/lib/glob.c
index 15b73f490720..aa57900d2062 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -68,6 +68,8 @@ bool __pure glob_match(char const *pat, char const *str)
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
+ if (c == '\0') /* No possible match */
+ return false;
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 30f6bbf04a4a..5aa51978e456 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -9,7 +9,8 @@ kunit-objs += test.o \
try-catch.o \
executor.o \
attributes.o \
- device.o
+ device.o \
+ platform.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
@@ -19,6 +20,7 @@ endif
obj-y += hooks.o
obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
+obj-$(CONFIG_KUNIT_TEST) += platform-test.o
# string-stream-test compiles built-in only.
ifeq ($(CONFIG_KUNIT_TEST),y)
diff --git a/lib/kunit/platform-test.c b/lib/kunit/platform-test.c
new file mode 100644
index 000000000000..e3debb8fbcef
--- /dev/null
+++ b/lib/kunit/platform-test.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for KUnit platform driver infrastructure.
+ */
+
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/test.h>
+
+/*
+ * Test that kunit_platform_device_alloc() creates a platform device.
+ */
+static void kunit_platform_device_alloc_test(struct kunit *test)
+{
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ kunit_platform_device_alloc(test, "kunit-platform", 1));
+}
+
+/*
+ * Test that kunit_platform_device_add() registers a platform device on the
+ * platform bus with the proper name and id.
+ */
+static void kunit_platform_device_add_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ KUNIT_EXPECT_TRUE(test, dev_is_platform(&pdev->dev));
+ KUNIT_EXPECT_STREQ(test, pdev->name, name);
+ KUNIT_EXPECT_EQ(test, pdev->id, id);
+}
+
+/*
+ * Test that kunit_platform_device_add() called twice with the same device name
+ * and id fails the second time and properly cleans up.
+ */
+static void kunit_platform_device_add_twice_fails_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-add-2";
+ const int id = -1;
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ pdev = kunit_platform_device_alloc(test, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ KUNIT_EXPECT_NE(test, 0, kunit_platform_device_add(test, pdev));
+}
+
+static int kunit_platform_device_find_by_name(struct device *dev, const void *data)
+{
+ return strcmp(dev_name(dev), data) == 0;
+}
+
+/*
+ * Test that kunit_platform_device_add() cleans up by removing the platform
+ * device when the test finishes. */
+static void kunit_platform_device_add_cleans_up(struct kunit *test)
+{
+ struct platform_device *pdev;
+ const char *name = "kunit-platform-clean";
+ const int id = -1;
+ struct kunit fake;
+ struct device *dev;
+
+ kunit_init_test(&fake, "kunit_platform_device_add_fake_test", NULL);
+ KUNIT_ASSERT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ pdev = kunit_platform_device_alloc(&fake, name, id);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(&fake, pdev));
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ put_device(dev);
+
+ /* Remove pdev */
+ kunit_cleanup(&fake);
+
+ /*
+ * Failing to migrate the kunit_resource would lead to an extra
+ * put_device() call on the platform device. The best we can do here is
+ * make sure the device no longer exists on the bus, but if something
+ * is wrong we'll see a refcount underflow here. We can't test for a
+ * refcount underflow because the kref matches the lifetime of the
+ * device which should already be freed and could be used by something
+ * else.
+ */
+ dev = bus_find_device(&platform_bus_type, NULL, name,
+ kunit_platform_device_find_by_name);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, dev);
+ put_device(dev);
+}
+
+/*
+ * Test suite for struct platform_device kunit APIs
+ */
+static struct kunit_case kunit_platform_device_test_cases[] = {
+ KUNIT_CASE(kunit_platform_device_alloc_test),
+ KUNIT_CASE(kunit_platform_device_add_test),
+ KUNIT_CASE(kunit_platform_device_add_twice_fails_test),
+ KUNIT_CASE(kunit_platform_device_add_cleans_up),
+ {}
+};
+
+static struct kunit_suite kunit_platform_device_suite = {
+ .name = "kunit_platform_device",
+ .test_cases = kunit_platform_device_test_cases,
+};
+
+struct kunit_platform_driver_test_context {
+ struct platform_driver pdrv;
+ const char *data;
+};
+
+static const char * const test_data = "test data";
+
+static inline struct kunit_platform_driver_test_context *
+to_test_context(struct platform_device *pdev)
+{
+ return container_of(to_platform_driver(pdev->dev.driver),
+ struct kunit_platform_driver_test_context,
+ pdrv);
+}
+
+static int kunit_platform_driver_probe(struct platform_device *pdev)
+{
+ struct kunit_platform_driver_test_context *ctx;
+
+ ctx = to_test_context(pdev);
+ ctx->data = test_data;
+
+ return 0;
+}
+
+/* Test that kunit_platform_driver_register() registers a driver that probes. */
+static void kunit_platform_driver_register_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-register";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ KUNIT_EXPECT_STREQ(test, ctx->data, test_data);
+}
+
+/*
+ * Test that kunit_platform_device_prepare_wait_for_probe() completes the completion
+ * when the device is already probed.
+ */
+static void kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct kunit_platform_driver_test_context *ctx;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ const char *name = "kunit-platform-wait";
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ pdev = kunit_platform_device_alloc(test, name, -1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+
+ ctx->pdrv.probe = kunit_platform_driver_probe;
+ ctx->pdrv.driver.name = name;
+ ctx->pdrv.driver.owner = THIS_MODULE;
+
+ /* Make sure driver has actually probed */
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ KUNIT_ASSERT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+
+ reinit_completion(&comp);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+
+ KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, HZ));
+}
+
+static struct kunit_case kunit_platform_driver_test_cases[] = {
+ KUNIT_CASE(kunit_platform_driver_register_test),
+ KUNIT_CASE(kunit_platform_device_prepare_wait_for_probe_completes_when_already_probed),
+ {}
+};
+
+/*
+ * Test suite for struct platform_driver kunit APIs
+ */
+static struct kunit_suite kunit_platform_driver_suite = {
+ .name = "kunit_platform_driver",
+ .test_cases = kunit_platform_driver_test_cases,
+};
+
+kunit_test_suites(
+ &kunit_platform_device_suite,
+ &kunit_platform_driver_suite,
+);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit test for KUnit platform driver infrastructure");
diff --git a/lib/kunit/platform.c b/lib/kunit/platform.c
new file mode 100644
index 000000000000..0b518de26065
--- /dev/null
+++ b/lib/kunit/platform.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test managed platform driver
+ */
+
+#include <linux/completion.h>
+#include <linux/device/bus.h>
+#include <linux/device/driver.h>
+#include <linux/platform_device.h>
+
+#include <kunit/platform_device.h>
+#include <kunit/resource.h>
+
+struct kunit_platform_device_alloc_params {
+ const char *name;
+ int id;
+};
+
+static int kunit_platform_device_alloc_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_platform_device_alloc_params *params = context;
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(params->name, params->id);
+ if (!pdev)
+ return -ENOMEM;
+
+ res->data = pdev;
+
+ return 0;
+}
+
+static void kunit_platform_device_alloc_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_put(pdev);
+}
+
+/**
+ * kunit_platform_device_alloc() - Allocate a KUnit test managed platform device
+ * @test: test context
+ * @name: device name of platform device to alloc
+ * @id: identifier of platform device to alloc.
+ *
+ * Allocate a test managed platform device. The device is put when the test completes.
+ *
+ * Return: Allocated platform device on success, NULL on failure.
+ */
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id)
+{
+ struct kunit_platform_device_alloc_params params = {
+ .name = name,
+ .id = id,
+ };
+
+ return kunit_alloc_resource(test,
+ kunit_platform_device_alloc_init,
+ kunit_platform_device_alloc_exit,
+ GFP_KERNEL, &params);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_alloc);
+
+static void kunit_platform_device_add_exit(struct kunit_resource *res)
+{
+ struct platform_device *pdev = res->data;
+
+ platform_device_unregister(pdev);
+}
+
+static bool
+kunit_platform_device_alloc_match(struct kunit *test,
+ struct kunit_resource *res, void *match_data)
+{
+ struct platform_device *pdev = match_data;
+
+ return res->data == pdev && res->free == kunit_platform_device_alloc_exit;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_device_unregister_wrapper,
+ platform_device_unregister, struct platform_device *);
+/**
+ * kunit_platform_device_add() - Register a KUnit test managed platform device
+ * @test: test context
+ * @pdev: platform device to add
+ *
+ * Register a test managed platform device. The device is unregistered when the
+ * test completes.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ return ret;
+
+ res = kunit_find_resource(test, kunit_platform_device_alloc_match, pdev);
+ if (res) {
+ /*
+ * Transfer the reference count of the platform device if it
+ * was allocated with kunit_platform_device_alloc(). In this
+ * case, calling platform_device_put() when the test exits from
+ * kunit_platform_device_alloc_exit() would lead to reference
+ * count underflow because platform_device_unregister_wrapper()
+ * calls platform_device_unregister() which also calls
+ * platform_device_put().
+ *
+ * Usually callers transfer the refcount initialized in
+ * platform_device_alloc() to platform_device_add() by calling
+ * platform_device_unregister() when platform_device_add()
+ * succeeds or platform_device_put() when it fails. KUnit has to
+ * keep this straight by redirecting the free routine for the
+ * resource to the right function. Luckily this only has to
+ * account for the success scenario.
+ */
+ res->free = kunit_platform_device_add_exit;
+ kunit_put_resource(res);
+ } else {
+ ret = kunit_add_action_or_reset(test, platform_device_unregister_wrapper, pdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_add);
+
+struct kunit_platform_device_probe_nb {
+ struct completion *x;
+ struct device *dev;
+ struct notifier_block nb;
+};
+
+static int kunit_platform_device_probe_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct kunit_platform_device_probe_nb *knb;
+ struct device *dev = data;
+
+ knb = container_of(nb, struct kunit_platform_device_probe_nb, nb);
+ if (event != BUS_NOTIFY_BOUND_DRIVER || knb->dev != dev)
+ return NOTIFY_DONE;
+
+ complete(knb->x);
+
+ return NOTIFY_OK;
+}
+
+static void kunit_platform_device_probe_nb_remove(void *nb)
+{
+ bus_unregister_notifier(&platform_bus_type, nb);
+}
+
+/**
+ * kunit_platform_device_prepare_wait_for_probe() - Prepare a completion
+ * variable to wait for a platform device to probe
+ * @test: test context
+ * @pdev: platform device to prepare to wait for probe of
+ * @x: completion variable completed when @dev has probed
+ *
+ * Prepare a completion variable @x to wait for @pdev to probe. Waiting on the
+ * completion forces a preemption, allowing the platform driver to probe.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct platform_device *pdev;
+ * struct platform_driver *pdrv;
+ * DECLARE_COMPLETION_ONSTACK(comp);
+ *
+ * pdev = kunit_platform_device_alloc(test, "kunit-platform", -1);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ *
+ * pdrv = kunit_kzalloc(test, sizeof(*pdrv), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdrv);
+ *
+ * pdrv->probe = kunit_platform_driver_probe;
+ * pdrv->driver.name = "kunit-platform";
+ * pdrv->driver.owner = THIS_MODULE;
+ *
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_prepare_wait_for_probe(test, pdev, &comp));
+ * KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, pdrv));
+ *
+ * KUNIT_EXPECT_NE(test, 0, wait_for_completion_timeout(&comp, 3 * HZ));
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x)
+{
+ struct device *dev = &pdev->dev;
+ struct kunit_platform_device_probe_nb *knb;
+ bool bound;
+
+ knb = kunit_kzalloc(test, sizeof(*knb), GFP_KERNEL);
+ if (!knb)
+ return -ENOMEM;
+
+ knb->nb.notifier_call = kunit_platform_device_probe_notify;
+ knb->dev = dev;
+ knb->x = x;
+
+ device_lock(dev);
+ bound = device_is_bound(dev);
+ if (bound) {
+ device_unlock(dev);
+ complete(x);
+ kunit_kfree(test, knb);
+ return 0;
+ }
+
+ bus_register_notifier(&platform_bus_type, &knb->nb);
+ device_unlock(&pdev->dev);
+
+ return kunit_add_action_or_reset(test, kunit_platform_device_probe_nb_remove, &knb->nb);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_device_prepare_wait_for_probe);
+
+KUNIT_DEFINE_ACTION_WRAPPER(platform_driver_unregister_wrapper,
+ platform_driver_unregister, struct platform_driver *);
+/**
+ * kunit_platform_driver_register() - Register a KUnit test managed platform driver
+ * @test: test context
+ * @drv: platform driver to register
+ *
+ * Register a test managed platform driver. This allows callers to embed the
+ * @drv in a container structure and use container_of() in the probe function
+ * to pass information to KUnit tests.
+ *
+ * Example
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_test_context {
+ * struct platform_driver pdrv;
+ * const char *data;
+ * };
+ *
+ * static inline struct kunit_test_context *
+ * to_test_context(struct platform_device *pdev)
+ * {
+ * return container_of(to_platform_driver(pdev->dev.driver),
+ * struct kunit_test_context,
+ * pdrv);
+ * }
+ *
+ * static int kunit_platform_driver_probe(struct platform_device *pdev)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = to_test_context(pdev);
+ * ctx->data = "test data";
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_platform_driver_test(struct kunit *test)
+ * {
+ * struct kunit_test_context *ctx;
+ *
+ * ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ *
+ * ctx->pdrv.probe = kunit_platform_driver_probe;
+ * ctx->pdrv.driver.name = "kunit-platform";
+ * ctx->pdrv.driver.owner = THIS_MODULE;
+ *
+ * KUNIT_EXPECT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
+ * <... wait for driver to probe ...>
+ * KUNIT_EXPECT_STREQ(test, ctx->data, "test data");
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, platform_driver_unregister_wrapper, drv);
+}
+EXPORT_SYMBOL_GPL(kunit_platform_driver_register);
diff --git a/lib/list-test.c b/lib/list-test.c
index 37cbc33e9fdb..e207c4c98d70 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -102,6 +102,8 @@ static void list_test_list_replace(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
}
static void list_test_list_replace_init(struct kunit *test)
@@ -118,6 +120,8 @@ static void list_test_list_replace_init(struct kunit *test)
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, a_new.prev, &list);
/* check a_old is empty (initialized) */
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index b3d9187611de..9e0d469c7658 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -243,7 +243,7 @@ static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
- hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), collision) {
/* "about to be changed" elements, pending transaction commit,
* are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */
@@ -303,7 +303,7 @@ void lc_del(struct lru_cache *lc, struct lc_element *e)
BUG_ON(e->refcnt);
e->lc_number = e->lc_new_number = LC_FREE;
- hlist_del_init(&e->colision);
+ hlist_del_init(&e->collision);
list_move(&e->list, &lc->free);
RETURN();
}
@@ -324,9 +324,9 @@ static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned n
PARANOIA_LC_ELEMENT(lc, e);
e->lc_new_number = new_number;
- if (!hlist_unhashed(&e->colision))
- __hlist_del(&e->colision);
- hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
+ if (!hlist_unhashed(&e->collision))
+ __hlist_del(&e->collision);
+ hlist_add_head(&e->collision, lc_hash_slot(lc, new_number));
list_move(&e->list, &lc->to_be_changed);
return e;
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index e7ac8694b797..bc45594ad2a8 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -621,6 +621,7 @@ void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
LZ4_streamHCPtr->internal_donotuse.base = NULL;
LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
}
+EXPORT_SYMBOL(LZ4_resetStreamHC);
int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
const char *dictionary,
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 6df3a8b95808..20990ecba2dd 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -348,17 +348,17 @@ static inline void *mte_safe_root(const struct maple_enode *node)
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
-static inline void *mte_set_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_set_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
-static inline void *mte_clear_full(const struct maple_enode *node)
+static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
}
-static inline bool mte_has_null(const struct maple_enode *node)
+static inline bool __maybe_unused mte_has_null(const struct maple_enode *node)
{
return (unsigned long)node & MAPLE_ENODE_NULL;
}
@@ -474,6 +474,7 @@ enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
/*
* mas_set_parent() - Set the parent node and encode the slot
+ * @mas: The maple state
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
@@ -534,7 +535,7 @@ unsigned int mte_parent_slot(const struct maple_enode *enode)
/*
* mte_parent() - Get the parent of @node.
- * @node: The encoded maple node.
+ * @enode: The encoded maple node.
*
* Return: The parent maple node.
*/
@@ -641,8 +642,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
/*
* ma_pivots() - Get a pointer to the maple node pivots.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* In the event of a dead node, this array may be %NULL
*
@@ -665,8 +666,8 @@ static inline unsigned long *ma_pivots(struct maple_node *node,
/*
* ma_gaps() - Get a pointer to the maple node gaps.
- * @node - the maple node
- * @type - the node type
+ * @node: the maple node
+ * @type: the node type
*
* Return: A pointer to the maple node gaps
*/
@@ -880,8 +881,6 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
* @mt: The maple tree
* @mn: The maple node
* @type: The maple node type
- * @offset: The offset of the highest sub-gap in this node.
- * @end: The end of the data in this node.
*/
static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
enum maple_type type)
@@ -939,7 +938,7 @@ static inline unsigned char ma_meta_gap(struct maple_node *mn)
/*
* ma_set_meta_gap() - Set the largest gap location in a nodes metadata
* @mn: The maple node
- * @mn: The maple node type
+ * @mt: The maple node type
* @offset: The location of the largest gap.
*/
static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
@@ -953,8 +952,8 @@ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
- * @mat - the ma_topiary, a linked list of dead nodes.
- * @dead_enode - the node to be marked as dead and added to the tail of the list
+ * @mat: the ma_topiary, a linked list of dead nodes.
+ * @dead_enode: the node to be marked as dead and added to the tail of the list
*
* Add the @dead_enode to the linked list in @mat.
*/
@@ -977,8 +976,8 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
- * @mas - the maple state
- * @mat - the ma_topiary linked list of dead nodes to free.
+ * @mas: the maple state
+ * @mat: the ma_topiary linked list of dead nodes to free.
*
* Destroy walk a dead list.
*/
@@ -999,7 +998,7 @@ static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
}
/*
* mas_descend() - Descend into the slot stored in the ma_state.
- * @mas - the maple state.
+ * @mas: the maple state.
*
* Note: Not RCU safe, only use in write side or debug code.
*/
@@ -1346,8 +1345,8 @@ static void mas_node_count(struct ma_state *mas, int count)
* Return:
* - If mas->node is an error or not mas_start, return NULL.
* - If it's an empty tree: NULL & mas->status == ma_none
- * - If it's a single entry: The entry & mas->status == mas_root
- * - If it's a tree: NULL & mas->status == safe root node.
+ * - If it's a single entry: The entry & mas->status == ma_root
+ * - If it's a tree: NULL & mas->status == ma_active
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1372,9 +1371,9 @@ retry:
return NULL;
}
+ mas->node = NULL;
/* empty tree */
if (unlikely(!root)) {
- mas->node = NULL;
mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
@@ -1462,7 +1461,7 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
- * @mas - the maple state
+ * @mas: the maple state
*
* Return: The maximum gap in the leaf.
*/
@@ -1544,7 +1543,7 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
* @node: The maple node
* @gaps: The pointer to the gaps
* @mt: The maple node type
- * @*off: Pointer to store the offset location of the gap.
+ * @off: Pointer to store the offset location of the gap.
*
* Uses the metadata data end to scan backwards across set gaps.
*
@@ -1651,7 +1650,7 @@ ascend:
/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
- * @mas - the maple state.
+ * @mas: the maple state.
*/
static inline void mas_update_gap(struct ma_state *mas)
{
@@ -1678,8 +1677,8 @@ static inline void mas_update_gap(struct ma_state *mas)
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
- * @mas - the maple state (for the tree)
- * @parent - the maple encoded node containing the children.
+ * @mas: the maple state (for the tree)
+ * @parent: the maple encoded node containing the children.
*/
static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
@@ -1701,8 +1700,8 @@ static inline void mas_adopt_children(struct ma_state *mas,
/*
* mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
* node as dead.
- * @mas - the maple state with the new node
- * @old_enode - The old maple encoded node to replace.
+ * @mas: the maple state with the new node
+ * @old_enode: The old maple encoded node to replace.
*/
static inline void mas_put_in_tree(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1730,8 +1729,8 @@ static inline void mas_put_in_tree(struct ma_state *mas,
* mas_replace_node() - Replace a node by putting it in the tree, marking it
* dead, and freeing it.
* the parent encoding to locate the maple node in the tree.
- * @mas - the ma_state with @mas->node pointing to the new node.
- * @old_enode - The old maple encoded node.
+ * @mas: the ma_state with @mas->node pointing to the new node.
+ * @old_enode: The old maple encoded node.
*/
static inline void mas_replace_node(struct ma_state *mas,
struct maple_enode *old_enode)
@@ -1796,7 +1795,6 @@ static inline void mab_shift_right(struct maple_big_node *b_node,
/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
* @b_node: the maple_big_node that contains the data.
- * @size: the amount of data in the b_node
* @split: the potential split location
* @slot_count: the size that can be stored in a single node being considered.
*
@@ -1844,6 +1842,7 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
+ * @mas: The maple state
* @bn: The maple_big_node with the data
* @mid_split: The second split, if required. 0 otherwise.
*
@@ -2177,7 +2176,8 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_none() - Set the enode and state.
+ * mas_node_or_none() - Set the enode and state.
+ * @mas: the maple state
* @enode: The encoded maple node.
*
* Set the node to the enode and the status.
@@ -2228,7 +2228,6 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
- * @old_r: The encoded maple node to the right (next node).
*/
static inline void mast_rebalance_next(struct maple_subtree_state *mast)
{
@@ -2242,7 +2241,6 @@ static inline void mast_rebalance_next(struct maple_subtree_state *mast)
/*
* mast_rebalance_prev() - Rebalance against the previous node
* @mast: The maple subtree state
- * @old_l: The encoded maple node to the left (previous node)
*/
static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
{
@@ -2393,9 +2391,9 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
- * @b_node - the big node to add the entry
- * @mas - the maple state to get the pivot (mas->max)
- * @entry - the entry to add, if NULL nothing happens.
+ * @b_node: the big node to add the entry
+ * @mas: the maple state to get the pivot (mas->max)
+ * @entry: the entry to add, if NULL nothing happens.
*/
static inline void mab_set_b_end(struct maple_big_node *b_node,
struct ma_state *mas,
@@ -2414,11 +2412,11 @@ static inline void mab_set_b_end(struct maple_big_node *b_node,
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
- * @mas - the maple state with the node that needs a parent
- * @left - possible parent 1
- * @right - possible parent 2
- * @slot - the slot the mas->node was placed
- * @split - the split location between @left and @right
+ * @mas: the maple state with the node that needs a parent
+ * @left: possible parent 1
+ * @right: possible parent 2
+ * @slot: the slot the mas->node was placed
+ * @split: the split location between @left and @right
*/
static inline void mas_set_split_parent(struct ma_state *mas,
struct maple_enode *left,
@@ -2438,11 +2436,11 @@ static inline void mas_set_split_parent(struct ma_state *mas,
/*
* mte_mid_split_check() - Check if the next node passes the mid-split
- * @**l: Pointer to left encoded maple node.
- * @**m: Pointer to middle encoded maple node.
- * @**r: Pointer to right encoded maple node.
+ * @l: Pointer to left encoded maple node.
+ * @m: Pointer to middle encoded maple node.
+ * @r: Pointer to right encoded maple node.
* @slot: The offset
- * @*split: The split location.
+ * @split: The split location.
* @mid_split: The middle split.
*/
static inline void mte_mid_split_check(struct maple_enode **l,
@@ -2466,10 +2464,10 @@ static inline void mte_mid_split_check(struct maple_enode **l,
/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
- * @mast - the maple subtree state
- * @left - the left node
- * @right - the right node
- * @split - the split location.
+ * @mast: the maple subtree state
+ * @left: the left node
+ * @right: the right node
+ * @split: the split location.
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
@@ -2503,7 +2501,6 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
/*
* mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
- * @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
*
* The node will either be RCU freed or pushed back on the maple state.
@@ -2635,7 +2632,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/*
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
- * @old: The old maple encoded node that is being replaced.
+ * @old_enode: The old maple encoded node that is being replaced.
*
* Updates gap as necessary.
*/
@@ -2823,10 +2820,8 @@ dead_node:
* orig_l_mas->last is used in mas_consume to find the slots that will need to
* be either freed or destroyed. orig_l_mas->depth keeps track of the height of
* the new sub-tree in case the sub-tree becomes the full tree.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static int mas_spanning_rebalance(struct ma_state *mas,
+static void mas_spanning_rebalance(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, mid_split;
@@ -2942,7 +2937,7 @@ new_root:
mas->offset = l_mas.offset;
mas_wmb_replace(mas, old_enode);
mtree_range_walk(mas);
- return mast->bn->b_end;
+ return;
}
/*
@@ -2952,10 +2947,8 @@ new_root:
*
* Rebalance two nodes into a single node or two new nodes that are sufficient.
* Continue upwards until tree is sufficient.
- *
- * Return: the number of elements in b_node during the last loop.
*/
-static inline int mas_rebalance(struct ma_state *mas,
+static inline void mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_count = mas_mt_height(mas);
@@ -2976,9 +2969,6 @@ static inline int mas_rebalance(struct ma_state *mas,
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
- mas_node_count(mas, empty_count * 2 - 1);
- if (mas_is_err(mas))
- return 0;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
@@ -3029,11 +3019,6 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
/* set up node. */
if (in_rcu) {
- /* Allocate for both left and right as well as parent. */
- mas_node_count(mas, 3);
- if (mas_is_err(mas))
- return;
-
newnode = mas_pop_node(mas);
} else {
newnode = &reuse;
@@ -3308,9 +3293,8 @@ static inline bool mas_push_data(struct ma_state *mas, int height,
* mas_split() - Split data that is too big for one node into two.
* @mas: The maple state
* @b_node: The maple big node
- * Return: 1 on success, 0 on failure.
*/
-static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
@@ -3341,10 +3325,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
trace_ma_op(__func__, mas);
mas->depth = mas_mt_height(mas);
- /* Allocation failures will happen early. */
- mas_node_count(mas, 1 + mas->depth * 2);
- if (mas_is_err(mas))
- return 0;
mast.l = &l_mas;
mast.r = &r_mas;
@@ -3392,75 +3372,25 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
mas->node = l_mas.node;
mas_wmb_replace(mas, old);
mtree_range_walk(mas);
- return 1;
-}
-
-/*
- * mas_reuse_node() - Reuse the node to store the data.
- * @wr_mas: The maple write state
- * @bn: The maple big node
- * @end: The end of the data.
- *
- * Will always return false in RCU mode.
- *
- * Return: True if node was reused, false otherwise.
- */
-static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *bn, unsigned char end)
-{
- /* Need to be rcu safe. */
- if (mt_in_rcu(wr_mas->mas->tree))
- return false;
-
- if (end > bn->b_end) {
- int clear = mt_slots[wr_mas->type] - bn->b_end;
-
- memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
- memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
- }
- mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
- return true;
+ return;
}
/*
* mas_commit_b_node() - Commit the big node into the tree.
* @wr_mas: The maple write state
* @b_node: The maple big node
- * @end: The end of the data.
*/
-static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
- struct maple_big_node *b_node, unsigned char end)
+static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node)
{
- struct maple_node *node;
- struct maple_enode *old_enode;
- unsigned char b_end = b_node->b_end;
- enum maple_type b_type = b_node->type;
-
- old_enode = wr_mas->mas->node;
- if ((b_end < mt_min_slots[b_type]) &&
- (!mte_is_root(old_enode)) &&
- (mas_mt_height(wr_mas->mas) > 1))
- return mas_rebalance(wr_mas->mas, b_node);
+ enum store_type type = wr_mas->mas->store_type;
- if (b_end >= mt_slots[b_type])
- return mas_split(wr_mas->mas, b_node);
+ WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store);
- if (mas_reuse_node(wr_mas, b_node, end))
- goto reuse_node;
-
- mas_node_count(wr_mas->mas, 1);
- if (mas_is_err(wr_mas->mas))
- return 0;
+ if (type == wr_rebalance)
+ return mas_rebalance(wr_mas->mas, b_node);
- node = mas_pop_node(wr_mas->mas);
- node->parent = mas_mn(wr_mas->mas)->parent;
- wr_mas->mas->node = mt_mk_node(node, b_type);
- mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
- mas_replace_node(wr_mas->mas, old_enode);
-reuse_node:
- mas_update_gap(wr_mas->mas);
- wr_mas->mas->end = b_end;
- return 1;
+ return mas_split(wr_mas->mas, b_node);
}
/*
@@ -3477,10 +3407,6 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
unsigned long *pivots;
int slot = 0;
- mas_node_count(mas, 1);
- if (unlikely(mas_is_err(mas)))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3526,10 +3452,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
/*
* mas_is_span_wr() - Check if the write needs to be treated as a write that
* spans the node.
- * @mas: The maple state
- * @piv: The pivot value being written
- * @type: The maple node type
- * @entry: The data to write
+ * @wr_mas: The maple write state
*
* Spanning writes are writes that start in one node and end in another OR if
* the write of a %NULL will cause the node to end with a %NULL.
@@ -3730,10 +3653,8 @@ static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
* @entry: The entry to store.
*
* Only valid when the index == 0 and the last == ULONG_MAX
- *
- * Return 0 on error, 1 on success.
*/
-static inline int mas_new_root(struct ma_state *mas, void *entry)
+static inline void mas_new_root(struct ma_state *mas, void *entry)
{
struct maple_enode *root = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
@@ -3749,10 +3670,6 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
goto done;
}
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return 0;
-
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
@@ -3769,7 +3686,7 @@ done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
- return 1;
+ return;
}
/*
* mas_wr_spanning_store() - Create a subtree with the store operation completed
@@ -3777,10 +3694,8 @@ done:
* Note that mas is expected to point to the node which caused the store to
* span.
* @wr_mas: The maple write state
- *
- * Return: 0 on error, positive on success.
*/
-static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
@@ -3815,9 +3730,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* entries per level plus a new root.
*/
height = mas_mt_height(mas);
- mas_node_count(mas, 1 + height * 3);
- if (mas_is_err(mas))
- return 0;
/*
* Set up right side. Need to get to the next offset after the spanning
@@ -3875,10 +3787,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
@@ -3889,11 +3799,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
- /* Check if there is enough data. The room is enough. */
- if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
- !(mas->mas_flags & MA_STATE_BULK))
- return false;
-
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
@@ -3901,10 +3806,6 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
/* set up node. */
if (in_rcu) {
- mas_node_count(mas, 1);
- if (mas_is_err(mas))
- return false;
-
newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
@@ -3960,16 +3861,14 @@ done:
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
- return true;
+ return;
}
/*
* mas_wr_slot_store: Attempt to store a value in a slot.
* @wr_mas: the maple write state
- *
- * Return: True if stored, false otherwise
*/
-static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
@@ -4001,7 +3900,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
} else {
- return false;
+ return;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
@@ -4012,7 +3911,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
if (!wr_mas->entry || gap)
mas_update_gap(mas);
- return true;
+ return;
}
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
@@ -4061,9 +3960,6 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
-
- if (!wr_mas->entry)
- mas_wr_extend_null(wr_mas);
}
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
@@ -4089,23 +3985,13 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
- *
- * Return: True if appended, false otherwise
*/
-static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+static inline void mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
- struct ma_state *mas;
+ struct ma_state *mas = wr_mas->mas;
void __rcu **slots;
- unsigned char end;
-
- mas = wr_mas->mas;
- if (mt_in_rcu(mas->tree))
- return false;
-
- end = mas->end;
- if (mas->offset != end)
- return false;
+ unsigned char end = mas->end;
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
@@ -4139,7 +4025,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
- return true;
+ return;
}
/*
@@ -4155,76 +4041,235 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
+ mas_commit_b_node(wr_mas, &b_node);
}
-static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end;
+ unsigned char new_end = mas_wr_new_end(wr_mas);
- /* Direct replacement */
- if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ switch (mas->store_type) {
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
+ return;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
+ mas_store_root(mas, wr_mas->entry);
+ break;
+ case wr_exact_fit:
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
mas_update_gap(mas);
- return;
+ break;
+ case wr_append:
+ mas_wr_append(wr_mas, new_end);
+ break;
+ case wr_slot_store:
+ mas_wr_slot_store(wr_mas);
+ break;
+ case wr_node_store:
+ mas_wr_node_store(wr_mas, new_end);
+ break;
+ case wr_spanning_store:
+ mas_wr_spanning_store(wr_mas);
+ break;
+ case wr_split_store:
+ case wr_rebalance:
+ mas_wr_bnode(wr_mas);
+ break;
+ }
+
+ return;
+}
+
+static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (!mas_is_active(mas)) {
+ if (mas_is_start(mas))
+ goto set_content;
+
+ if (unlikely(mas_is_paused(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_none(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(mas)))
+ goto reset;
}
/*
- * new_end exceeds the size of the maple node and cannot enter the fast
- * path.
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
*/
- new_end = mas_wr_new_end(wr_mas);
- if (new_end >= mt_slots[wr_mas->type])
- goto slow_path;
-
- /* Attempt to append */
- if (mas_wr_append(wr_mas, new_end))
- return;
+ if (mas->last > mas->max)
+ goto reset;
- if (new_end == mas->end && mas_wr_slot_store(wr_mas))
- return;
+ if (wr_mas->entry)
+ goto set_content;
- if (mas_wr_node_store(wr_mas, new_end))
- return;
+ if (mte_is_leaf(mas->node) && mas->last == mas->max)
+ goto reset;
- if (mas_is_err(mas))
- return;
+ goto set_content;
-slow_path:
- mas_wr_bnode(wr_mas);
+reset:
+ mas_reset(mas);
+set_content:
+ wr_mas->content = mas_start(mas);
}
-/*
- * mas_wr_store_entry() - Internal call to store a value
+/**
+ * mas_prealloc_calc() - Calculate number of nodes needed for a
+ * given store oepration
* @mas: The maple state
- * @entry: The entry to store.
+ * @entry: The entry to store into the tree
*
- * Return: The contents that was stored at the index.
+ * Return: Number of nodes required for preallocation.
*/
-static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
+static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
+{
+ int ret = mas_mt_height(mas) * 3 + 1;
+
+ switch (mas->store_type) {
+ case wr_invalid:
+ WARN_ON_ONCE(1);
+ break;
+ case wr_new_root:
+ ret = 1;
+ break;
+ case wr_store_root:
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ ret = 1;
+ else if (((unsigned long) (entry) & 3) == 2)
+ ret = 1;
+ else
+ ret = 0;
+ break;
+ case wr_spanning_store:
+ ret = mas_mt_height(mas) * 3 + 1;
+ break;
+ case wr_split_store:
+ ret = mas_mt_height(mas) * 2 + 1;
+ break;
+ case wr_rebalance:
+ ret = mas_mt_height(mas) * 2 - 1;
+ break;
+ case wr_node_store:
+ ret = mt_in_rcu(mas->tree) ? 1 : 0;
+ break;
+ case wr_append:
+ case wr_exact_fit:
+ case wr_slot_store:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * mas_wr_store_type() - Set the store type for a given
+ * store operation.
+ * @wr_mas: The maple write state
+ */
+static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end;
- wr_mas->content = mas_start(mas);
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, wr_mas->entry);
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
+ mas->store_type = wr_store_root;
return;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
- mas_wr_spanning_store(wr_mas);
+ mas->store_type = wr_spanning_store;
return;
}
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
- /* New root for a single pointer */
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- mas_new_root(mas, wr_mas->entry);
- else
- mas_wr_modify(wr_mas);
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+
+ new_end = mas_wr_new_end(wr_mas);
+ if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
+ mas->store_type = wr_exact_fit;
+ return;
+ }
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas->store_type = wr_new_root;
+ return;
+ }
+
+ /* Potential spanning rebalance collapsing a node */
+ if (new_end < mt_min_slots[wr_mas->type]) {
+ if (!mte_is_root(mas->node)) {
+ mas->store_type = wr_rebalance;
+ return;
+ }
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ if (new_end >= mt_slots[wr_mas->type]) {
+ mas->store_type = wr_split_store;
+ return;
+ }
+
+ if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
+ mas->store_type = wr_append;
+ return;
+ }
+
+ if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
+ (wr_mas->offset_end - mas->offset == 1))) {
+ mas->store_type = wr_slot_store;
+ return;
+ }
+
+ if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
+ (mas->mas_flags & MA_STATE_BULK)) {
+ mas->store_type = wr_node_store;
+ return;
+ }
+
+ mas->store_type = wr_invalid;
+ MAS_WARN_ON(mas, 1);
+}
+
+/**
+ * mas_wr_preallocate() - Preallocate enough nodes for a store operation
+ * @wr_mas: The maple write state
+ * @entry: The entry that will be stored
+ *
+ */
+static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
+{
+ struct ma_state *mas = wr_mas->mas;
+ int request;
+
+ mas_wr_prealloc_setup(wr_mas);
+ mas_wr_store_type(wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return;
+
+ mas_node_count(mas, request);
}
/**
@@ -4257,26 +4302,24 @@ static inline void *mas_insert(struct ma_state *mas, void *entry)
if (wr_mas.content)
goto exists;
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
- mas_store_root(mas, entry);
+ mas_wr_preallocate(&wr_mas, entry);
+ if (mas_is_err(mas))
return NULL;
- }
/* spanning writes always overwrite something */
- if (!mas_wr_walk(&wr_mas))
+ if (mas->store_type == wr_spanning_store)
goto exists;
/* At this point, we are at the leaf node that needs to be altered. */
- wr_mas.offset_end = mas->offset;
- wr_mas.end_piv = wr_mas.r_max;
+ if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) {
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
- if (wr_mas.content || (mas->last > wr_mas.r_max))
- goto exists;
-
- if (!entry)
- return NULL;
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+ }
- mas_wr_modify(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
return wr_mas.content;
exists:
@@ -4331,6 +4374,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
+ mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
@@ -4440,9 +4484,8 @@ no_entry:
* mas_prev_slot() - Get the entry in the previous slot
*
* @mas: The maple state
- * @max: The minimum starting range
+ * @min: The minimum starting range
* @empty: Can be empty
- * @set_underflow: Set the @mas->node to underflow state on limit.
*
* Return: The entry in the previous slot which is possibly NULL
*/
@@ -4525,6 +4568,7 @@ underflow:
/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
+ * @node: The maple node
* @max: The maximum pivot value to check.
*
* The next value will be mas->node[mas->offset] or the status will have
@@ -4615,8 +4659,6 @@ overflow:
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
- * @set_overflow: Should @mas->node be set to overflow when the limit is
- * reached.
*
* Return: The entry in the next slot which is possibly NULL
*/
@@ -5150,9 +5192,9 @@ EXPORT_SYMBOL_GPL(mas_empty_area_rev);
/*
* mte_dead_leaves() - Mark all leaves of a node as dead.
- * @mas: The maple state
+ * @enode: the encoded node
+ * @mt: the maple tree
* @slots: Pointer to the slot array
- * @type: The maple node type
*
* Must hold the write lock.
*
@@ -5358,47 +5400,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
mt_destroy_walk(enode, mt, true);
}
}
-
-static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
-{
- if (!mas_is_active(wr_mas->mas)) {
- if (mas_is_start(wr_mas->mas))
- return;
-
- if (unlikely(mas_is_paused(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_none(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_overflow(wr_mas->mas)))
- goto reset;
-
- if (unlikely(mas_is_underflow(wr_mas->mas)))
- goto reset;
- }
-
- /*
- * A less strict version of mas_is_span_wr() where we allow spanning
- * writes within this node. This is to stop partial walks in
- * mas_prealloc() from being reset.
- */
- if (wr_mas->mas->last > wr_mas->mas->max)
- goto reset;
-
- if (wr_mas->entry)
- return;
-
- if (mte_is_leaf(wr_mas->mas->node) &&
- wr_mas->mas->last == wr_mas->mas->max)
- goto reset;
-
- return;
-
-reset:
- mas_reset(wr_mas->mas);
-}
-
/* Interface */
/**
@@ -5407,13 +5408,12 @@ reset:
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
- * Note: The @mas should have pre-allocated entries to ensure there is memory to
- * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
void *mas_store(struct ma_state *mas, void *entry)
{
+ int request;
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
@@ -5434,8 +5434,25 @@ void *mas_store(struct ma_state *mas, void *entry)
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
- mas_wr_store_setup(&wr_mas);
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ mas_wr_store_entry(&wr_mas);
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
+ return wr_mas.content;
+ }
+
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ goto store;
+
+ mas_node_count(mas, request);
+ if (mas_is_err(mas))
+ return NULL;
+
+store:
mas_wr_store_entry(&wr_mas);
+ mas_destroy(mas);
return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
@@ -5451,19 +5468,28 @@ EXPORT_SYMBOL_GPL(mas_store);
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
+ unsigned long index = mas->index;
+ unsigned long last = mas->last;
MA_WR_STATE(wr_mas, mas, entry);
+ int ret = 0;
- mas_wr_store_setup(&wr_mas);
- trace_ma_write(__func__, mas, 0, entry);
retry:
- mas_wr_store_entry(&wr_mas);
- if (unlikely(mas_nomem(mas, gfp)))
+ mas_wr_preallocate(&wr_mas, entry);
+ if (unlikely(mas_nomem(mas, gfp))) {
+ if (!entry)
+ __mas_set_range(mas, index, last);
goto retry;
+ }
- if (unlikely(mas_is_err(mas)))
- return xa_err(mas->node);
+ if (mas_is_err(mas)) {
+ ret = xa_err(mas->node);
+ goto out;
+ }
- return 0;
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
+ return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
@@ -5477,7 +5503,19 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
- mas_wr_store_setup(&wr_mas);
+ if (mas->store_type == wr_store_root) {
+ mas_wr_prealloc_setup(&wr_mas);
+ goto store;
+ }
+
+ mas_wr_walk_descend(&wr_mas);
+ if (mas->store_type != wr_spanning_store) {
+ /* set wr_mas->content to current slot */
+ wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset);
+ mas_wr_end_piv(&wr_mas);
+ }
+
+store:
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
@@ -5496,70 +5534,25 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
- unsigned char node_size;
- int request = 1;
- int ret;
-
-
- if (unlikely(!mas->index && mas->last == ULONG_MAX))
- goto ask_now;
-
- mas_wr_store_setup(&wr_mas);
- wr_mas.content = mas_start(mas);
- /* Root expand */
- if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
- goto ask_now;
-
- if (unlikely(!mas_wr_walk(&wr_mas))) {
- /* Spanning store, use worst case for now */
- request = 1 + mas_mt_height(mas) * 3;
- goto ask_now;
- }
-
- /* At this point, we are at the leaf node that needs to be altered. */
- /* Exact fit, no nodes needed. */
- if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
- return 0;
-
- mas_wr_end_piv(&wr_mas);
- node_size = mas_wr_new_end(&wr_mas);
+ int ret = 0;
+ int request;
- /* Slot store, does not require additional nodes */
- if (node_size == mas->end) {
- /* reuse node */
- if (!mt_in_rcu(mas->tree))
- return 0;
- /* shifting boundary */
- if (wr_mas.offset_end - mas->offset == 1)
- return 0;
- }
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
+ request = mas_prealloc_calc(mas, entry);
+ if (!request)
+ return ret;
- if (node_size >= mt_slots[wr_mas.type]) {
- /* Split, worst case for now. */
- request = 1 + mas_mt_height(mas) * 2;
- goto ask_now;
+ mas_node_count_gfp(mas, request, gfp);
+ if (mas_is_err(mas)) {
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
}
- /* New root needs a single node */
- if (unlikely(mte_is_root(mas->node)))
- goto ask_now;
-
- /* Potential spanning rebalance collapsing a node, use worst-case */
- if (node_size - 1 <= mt_min_slots[wr_mas.type])
- request = mas_mt_height(mas) * 2 - 1;
-
- /* node store, slot store needs one node */
-ask_now:
- mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
- if (likely(!mas_is_err(mas)))
- return 0;
-
- mas_set_alloc_req(mas, 0);
- ret = xa_err(mas->node);
- mas_reset(mas);
- mas_destroy(mas);
- mas_reset(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
@@ -5585,7 +5578,8 @@ void mas_destroy(struct ma_state *mas)
*/
if (mas->mas_flags & MA_STATE_REBALANCE) {
unsigned char end;
-
+ if (mas_is_err(mas))
+ mas_reset(mas);
mas_start(mas);
mtree_range_walk(mas);
end = mas->end + 1;
@@ -6245,24 +6239,32 @@ EXPORT_SYMBOL_GPL(mas_find_range_rev);
void *mas_erase(struct ma_state *mas)
{
void *entry;
+ unsigned long index = mas->index;
MA_WR_STATE(wr_mas, mas, NULL);
if (!mas_is_active(mas) || !mas_is_start(mas))
mas->status = ma_start;
- /* Retry unnecessary when holding the write lock. */
+write_retry:
entry = mas_state_walk(mas);
if (!entry)
return NULL;
-write_retry:
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
- mas_wr_store_setup(&wr_mas);
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(mas, GFP_KERNEL))
+ mas_wr_preallocate(&wr_mas, NULL);
+ if (mas_nomem(mas, GFP_KERNEL)) {
+ /* in case the range of entry changed when unlocked */
+ mas->index = mas->last = index;
goto write_retry;
+ }
+ if (mas_is_err(mas))
+ goto out;
+
+ mas_wr_store_entry(&wr_mas);
+out:
+ mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
@@ -6277,10 +6279,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
- if (likely(mas->node != MA_ERROR(-ENOMEM))) {
- mas_destroy(mas);
+ if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
- }
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
@@ -6357,7 +6357,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(mas, mt, index, last);
- MA_WR_STATE(wr_mas, &mas, entry);
+ int ret = 0;
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
@@ -6367,16 +6367,10 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
return -EINVAL;
mtree_lock(mt);
-retry:
- mas_wr_store_entry(&wr_mas);
- if (mas_nomem(&mas, gfp))
- goto retry;
-
+ ret = mas_store_gfp(&mas, entry, gfp);
mtree_unlock(mt);
- if (mas_is_err(&mas))
- return xa_err(mas.node);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mtree_store_range);
@@ -6412,6 +6406,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
+ int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6427,9 +6422,10 @@ retry:
mtree_unlock(mt);
if (mas_is_err(&ms))
- return xa_err(ms.node);
+ ret = xa_err(ms.node);
- return 0;
+ mas_destroy(&ms);
+ return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
@@ -6484,6 +6480,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
@@ -6565,6 +6562,7 @@ retry:
unlock:
mtree_unlock(mt);
+ mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
@@ -6997,6 +6995,19 @@ void mt_set_non_kernel(unsigned int val)
kmem_cache_set_non_kernel(maple_node_cache, val);
}
+extern void kmem_cache_set_callback(struct kmem_cache *cachep,
+ void (*callback)(void *));
+void mt_set_callback(void (*callback)(void *))
+{
+ kmem_cache_set_callback(maple_node_cache, callback);
+}
+
+extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private);
+void mt_set_private(void *private)
+{
+ kmem_cache_set_private(maple_node_cache, private);
+}
+
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
@@ -7181,7 +7192,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
- bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
@@ -7215,19 +7225,22 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
break;
if (last == 0 && i > 0)
break;
- if (leaf)
- mt_dump_entry(mt_slot(mt, node->slot, i),
- first, last, depth + 1, format);
- else if (node->slot[i])
+ if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
- break;
+ break;
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
}
first = last + 1;
}
@@ -7627,6 +7640,40 @@ void mas_dump(const struct ma_state *mas)
break;
}
+ pr_err("Store Type: ");
+ switch (mas->store_type) {
+ case wr_invalid:
+ pr_err("invalid store type\n");
+ break;
+ case wr_new_root:
+ pr_err("new_root\n");
+ break;
+ case wr_store_root:
+ pr_err("store_root\n");
+ break;
+ case wr_exact_fit:
+ pr_err("exact_fit\n");
+ break;
+ case wr_split_store:
+ pr_err("split_store\n");
+ break;
+ case wr_slot_store:
+ pr_err("slot_store\n");
+ break;
+ case wr_append:
+ pr_err("append\n");
+ break;
+ case wr_node_store:
+ pr_err("node_store\n");
+ break;
+ case wr_spanning_store:
+ pr_err("spanning_store\n");
+ break;
+ case wr_rebalance:
+ pr_err("rebalance\n");
+ break;
+ }
+
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 3c1f92a7459d..3ef11305f8d2 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 191761b1b623..5faa29208bdb 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -186,55 +186,84 @@ EXPORT_SYMBOL(iter_div_u64_rem);
#ifndef mul_u64_u64_div_u64
u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
{
- u64 res = 0, div, rem;
- int shift;
+ if (ilog2(a) + ilog2(b) <= 62)
+ return div64_u64(a * b, c);
- /* can a * b overflow ? */
- if (ilog2(a) + ilog2(b) > 62) {
- /*
- * Note that the algorithm after the if block below might lose
- * some precision and the result is more exact for b > a. So
- * exchange a and b if a is bigger than b.
- *
- * For example with a = 43980465100800, b = 100000000, c = 1000000000
- * the below calculation doesn't modify b at all because div == 0
- * and then shift becomes 45 + 26 - 62 = 9 and so the result
- * becomes 4398035251080. However with a and b swapped the exact
- * result is calculated (i.e. 4398046510080).
- */
- if (a > b)
- swap(a, b);
+#if defined(__SIZEOF_INT128__)
+
+ /* native 64x64=128 bits multiplication */
+ u128 prod = (u128)a * b;
+ u64 n_lo = prod, n_hi = prod >> 64;
+
+#else
+
+ /* perform a 64x64=128 bits multiplication manually */
+ u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+ u64 x, y, z;
+
+ x = (u64)a_lo * b_lo;
+ y = (u64)a_lo * b_hi + (u32)(x >> 32);
+ z = (u64)a_hi * b_hi + (u32)(y >> 32);
+ y = (u64)a_hi * b_lo + (u32)y;
+ z += (u32)(y >> 32);
+ x = (y << 32) + (u32)x;
+
+ u64 n_lo = x, n_hi = z;
+
+#endif
+
+ /* make sure c is not zero, trigger exception otherwise */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdiv-by-zero"
+ if (unlikely(c == 0))
+ return 1/0;
+#pragma GCC diagnostic pop
+
+ int shift = __builtin_ctzll(c);
+ /* try reducing the fraction in case the dividend becomes <= 64 bits */
+ if ((n_hi >> shift) == 0) {
+ u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
+
+ return div64_u64(n, c >> shift);
/*
- * (b * a) / c is equal to
- *
- * (b / c) * a +
- * (b % c) * a / c
- *
- * if nothing overflows. Can the 1st multiplication
- * overflow? Yes, but we do not care: this can only
- * happen if the end result can't fit in u64 anyway.
- *
- * So the code below does
- *
- * res = (b / c) * a;
- * b = b % c;
+ * The remainder value if needed would be:
+ * res = div64_u64_rem(n, c >> shift, &rem);
+ * rem = (rem << shift) + (n_lo - (n << shift));
*/
- div = div64_u64_rem(b, c, &rem);
- res = div * a;
- b = rem;
-
- shift = ilog2(a) + ilog2(b) - 62;
- if (shift > 0) {
- /* drop precision */
- b >>= shift;
- c >>= shift;
- if (!c)
- return res;
- }
}
- return res + div64_u64(a * b, c);
+ if (n_hi >= c) {
+ /* overflow: result is unrepresentable in a u64 */
+ return -1;
+ }
+
+ /* Do the full 128 by 64 bits division */
+
+ shift = __builtin_clzll(c);
+ c <<= shift;
+
+ int p = 64 + shift;
+ u64 res = 0;
+ bool carry;
+
+ do {
+ carry = n_hi >> 63;
+ shift = carry ? 1 : __builtin_clzll(n_hi);
+ if (p < shift)
+ break;
+ p -= shift;
+ n_hi <<= shift;
+ n_hi |= n_lo >> (64 - shift);
+ n_lo <<= shift;
+ if (carry || (n_hi >= c)) {
+ n_hi -= c;
+ res |= 1ULL << p;
+ }
+ } while (n_hi);
+ /* The remainder value if needed would be n_hi << p */
+
+ return res;
}
EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif
diff --git a/lib/math/test_mul_u64_u64_div_u64.c b/lib/math/test_mul_u64_u64_div_u64.c
new file mode 100644
index 000000000000..58d058de4e73
--- /dev/null
+++ b/lib/math/test_mul_u64_u64_div_u64.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 BayLibre SAS
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/math64.h>
+
+typedef struct { u64 a; u64 b; u64 c; u64 result; } test_params;
+
+static test_params test_values[] = {
+/* this contains many edge values followed by a couple random values */
+{ 0xb, 0x7, 0x3, 0x19 },
+{ 0xffff0000, 0xffff0000, 0xf, 0x1110eeef00000000 },
+{ 0xffffffff, 0xffffffff, 0x1, 0xfffffffe00000001 },
+{ 0xffffffff, 0xffffffff, 0x2, 0x7fffffff00000000 },
+{ 0x1ffffffff, 0xffffffff, 0x2, 0xfffffffe80000000 },
+{ 0x1ffffffff, 0xffffffff, 0x3, 0xaaaaaaa9aaaaaaab },
+{ 0x1ffffffff, 0x1ffffffff, 0x4, 0xffffffff00000000 },
+{ 0xffff000000000000, 0xffff000000000000, 0xffff000000000001, 0xfffeffffffffffff },
+{ 0x3333333333333333, 0x3333333333333333, 0x5555555555555555, 0x1eb851eb851eb851 },
+{ 0x7fffffffffffffff, 0x2, 0x3, 0x5555555555555554 },
+{ 0xffffffffffffffff, 0x2, 0x8000000000000000, 0x3 },
+{ 0xffffffffffffffff, 0x2, 0xc000000000000000, 0x2 },
+{ 0xffffffffffffffff, 0x4000000000000004, 0x8000000000000000, 0x8000000000000007 },
+{ 0xffffffffffffffff, 0x4000000000000001, 0x8000000000000000, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000001 },
+{ 0xfffffffffffffffe, 0x8000000000000001, 0xffffffffffffffff, 0x8000000000000000 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffe, 0x8000000000000001 },
+{ 0xffffffffffffffff, 0x8000000000000001, 0xfffffffffffffffd, 0x8000000000000002 },
+{ 0x7fffffffffffffff, 0xffffffffffffffff, 0xc000000000000000, 0xaaaaaaaaaaaaaaa8 },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0xa000000000000000, 0xccccccccccccccca },
+{ 0xffffffffffffffff, 0x7fffffffffffffff, 0x9000000000000000, 0xe38e38e38e38e38b },
+{ 0x7fffffffffffffff, 0x7fffffffffffffff, 0x5000000000000000, 0xccccccccccccccc9 },
+{ 0xffffffffffffffff, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfffffffffffffffe },
+{ 0xe6102d256d7ea3ae, 0x70a77d0be4c31201, 0xd63ec35ab3220357, 0x78f8bf8cc86c6e18 },
+{ 0xf53bae05cb86c6e1, 0x3847b32d2f8d32e0, 0xcfd4f55a647f403c, 0x42687f79d8998d35 },
+{ 0x9951c5498f941092, 0x1f8c8bfdf287a251, 0xa3c8dc5f81ea3fe2, 0x1d887cb25900091f },
+{ 0x374fee9daa1bb2bb, 0x0d0bfbff7b8ae3ef, 0xc169337bd42d5179, 0x03bb2dbaffcbb961 },
+{ 0xeac0d03ac10eeaf0, 0x89be05dfa162ed9b, 0x92bb1679a41f0e4b, 0xdc5f5cc9e270d216 },
+};
+
+/*
+ * The above table can be verified with the following shell script:
+ *
+ * #!/bin/sh
+ * sed -ne 's/^{ \+\(.*\), \+\(.*\), \+\(.*\), \+\(.*\) },$/\1 \2 \3 \4/p' \
+ * lib/math/test_mul_u64_u64_div_u64.c |
+ * while read a b c r; do
+ * expected=$( printf "obase=16; ibase=16; %X * %X / %X\n" $a $b $c | bc )
+ * given=$( printf "%X\n" $r )
+ * if [ "$expected" = "$given" ]; then
+ * echo "$a * $b / $c = $r OK"
+ * else
+ * echo "$a * $b / $c = $r is wrong" >&2
+ * echo "should be equivalent to 0x$expected" >&2
+ * exit 1
+ * fi
+ * done
+ */
+
+static int __init test_init(void)
+{
+ int i;
+
+ pr_info("Starting mul_u64_u64_div_u64() test\n");
+
+ for (i = 0; i < ARRAY_SIZE(test_values); i++) {
+ u64 a = test_values[i].a;
+ u64 b = test_values[i].b;
+ u64 c = test_values[i].c;
+ u64 expected_result = test_values[i].result;
+ u64 result = mul_u64_u64_div_u64(a, b, c);
+
+ if (result != expected_result) {
+ pr_err("ERROR: 0x%016llx * 0x%016llx / 0x%016llx\n", a, b, c);
+ pr_err("ERROR: expected result: %016llx\n", expected_result);
+ pr_err("ERROR: obtained result: %016llx\n", result);
+ }
+ }
+
+ pr_info("Completed mul_u64_u64_div_u64() test\n");
+ return 0;
+}
+
+static void __exit test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mul_u64_u64_div_u64() test module");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 51bc5246986d..2891f94a11c6 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -209,7 +209,7 @@ int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
- fbc[i].counters = (void *)counters + (i * counter_size);
+ fbc[i].counters = (void __percpu *)counters + i * counter_size;
debug_percpu_counter_activate(&fbc[i]);
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index dbbed19f8fff..6c902639728b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -189,7 +189,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size = nbuckets;
- if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
+ if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5e2e93307f0d..d3412984170c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
{
unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock);
+ guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
@@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
}
for (i = 0; i < sb->map_nr; i++)
- spin_lock_init(&sb->map[i].swap_lock);
+ raw_spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6432b8c3e431..989a12a67872 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,6 +120,15 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ src = masked_user_access_begin(src);
+ retval = do_strncpy_from_user(dst, src, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index feeb935a2299..6e489f9e90f1 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,6 +96,15 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
+ if (can_do_masked_user_access()) {
+ long retval;
+
+ str = masked_user_access_begin(str);
+ retval = do_strnlen_user(str, count, count);
+ user_read_access_end();
+ return retval;
+ }
+
max_addr = TASK_SIZE_MAX;
src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
diff --git a/lib/test_bits.c b/lib/test_bits.c
index 01313980f175..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/test_bits.c
@@ -39,6 +39,36 @@ static void genmask_ull_test(struct kunit *test)
#endif
}
+static void genmask_u128_test(struct kunit *test)
+{
+#ifdef CONFIG_ARCH_SUPPORTS_INT128
+ /* Below 64 bit masks */
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(0, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000006ull, GENMASK_U128(2, 1));
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(31, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_U128(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(63, 0));
+
+ /* Above 64 bit masks - only 64 bit portion can be validated once */
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(64, 0) >> 1);
+ KUNIT_EXPECT_EQ(test, 0x00000000ffffffffull, GENMASK_U128(81, 50) >> 50);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ffffffull, GENMASK_U128(87, 64) >> 64);
+ KUNIT_EXPECT_EQ(test, 0x0000000000ff0000ull, GENMASK_U128(87, 80) >> 64);
+
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_U128(127, 0) >> 64);
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, (u64)GENMASK_U128(127, 0));
+ KUNIT_EXPECT_EQ(test, 0x0000000000000003ull, GENMASK_U128(127, 126) >> 126);
+ KUNIT_EXPECT_EQ(test, 0x0000000000000001ull, GENMASK_U128(127, 127) >> 127);
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_U128(0, 1);
+ GENMASK_U128(0, 10);
+ GENMASK_U128(9, 10);
+#endif /* TEST_GENMASK_FAILURES */
+#endif /* CONFIG_ARCH_SUPPORTS_INT128 */
+}
+
static void genmask_input_check_test(struct kunit *test)
{
unsigned int x, y;
@@ -56,12 +86,16 @@ static void genmask_input_check_test(struct kunit *test)
/* Valid input */
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(100, 80));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(110, 65));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(127, 0));
}
static struct kunit_case bits_test_cases[] = {
KUNIT_CASE(genmask_test),
KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_u128_test),
KUNIT_CASE(genmask_input_check_test),
{}
};
diff --git a/lib/test_fpu_glue.c b/lib/test_fpu_glue.c
index 074f30301f29..c0596426370a 100644
--- a/lib/test_fpu_glue.c
+++ b/lib/test_fpu_glue.c
@@ -42,7 +42,7 @@ static int __init test_fpu_init(void)
return -EINVAL;
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
- if (!selftest_dir)
+ if (IS_ERR(selftest_dir))
return -ENOMEM;
debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index ee20e1f9bae9..056f2e411d7b 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -799,10 +799,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
unsigned long mapped = 0;
int i;
- if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
- next = end;
- else
- next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+ next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT));
ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
/*
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
index bfdb81599832..5a3f6961a70f 100644
--- a/lib/test_objpool.c
+++ b/lib/test_objpool.c
@@ -687,4 +687,5 @@ static void __exit ot_mod_exit(void)
module_init(ot_mod_init);
module_exit(ot_mod_exit);
-MODULE_LICENSE("GPL"); \ No newline at end of file
+MODULE_DESCRIPTION("Test module for lockless object pool");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 965cb6f28527..8448b6d02bd9 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -641,26 +641,12 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init page_type_test(unsigned int page_type, const char *name,
- char *cmp_buf)
-{
- unsigned long size;
-
- size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type);
- if (page_type_has_type(page_type))
- size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
-
- snprintf(cmp_buf + size, BUF_SIZE - size, ")");
- test(cmp_buf, "%pGt", &page_type);
-}
-
static void __init
flags(void)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- unsigned int page_type;
cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!cmp_buffer)
@@ -700,18 +686,6 @@ flags(void)
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
- page_type = ~0;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = 10;
- page_type_test(page_type, "", cmp_buffer);
-
- page_type = ~PG_buddy;
- page_type_test(page_type, "buddy", cmp_buffer);
-
- page_type = ~(PG_table | PG_buddy);
- page_type_test(page_type, "table|buddy", cmp_buffer);
-
kfree(cmp_buffer);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2d71b1115916..09f022ba1c05 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2054,25 +2054,6 @@ char *format_page_flags(char *buf, char *end, unsigned long flags)
return buf;
}
-static
-char *format_page_type(char *buf, char *end, unsigned int page_type)
-{
- buf = number(buf, end, page_type, default_flag_spec);
-
- if (buf < end)
- *buf = '(';
- buf++;
-
- if (page_type_has_type(page_type))
- buf = format_flags(buf, end, ~page_type, pagetype_names);
-
- if (buf < end)
- *buf = ')';
- buf++;
-
- return buf;
-}
-
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
@@ -2086,8 +2067,6 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
switch (fmt[1]) {
case 'p':
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
- case 't':
- return format_page_type(buf, end, *(unsigned int *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index aef086a6bf2f..20aa459bfb3e 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -5,7 +5,8 @@ config XZ_DEC
help
LZMA2 compression algorithm and BCJ filters are supported using
the .xz file format as the container. For integrity checking,
- CRC32 is supported. See Documentation/staging/xz.rst for more information.
+ CRC32 is supported. See Documentation/staging/xz.rst for more
+ information.
if XZ_DEC
@@ -29,11 +30,21 @@ config XZ_DEC_ARMTHUMB
default y
select XZ_DEC_BCJ
+config XZ_DEC_ARM64
+ bool "ARM64 BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_SPARC
bool "SPARC BCJ filter decoder" if EXPERT
default y
select XZ_DEC_BCJ
+config XZ_DEC_RISCV
+ bool "RISC-V BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
config XZ_DEC_MICROLZMA
bool "MicroLZMA decoder"
default n
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 88a2c35e1b59..6a7906a328ba 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
/*
@@ -27,9 +26,9 @@
STATIC_RW_DATA uint32_t xz_crc32_table[256];
-XZ_EXTERN void xz_crc32_init(void)
+void xz_crc32_init(void)
{
- const uint32_t poly = CRC32_POLY_LE;
+ const uint32_t poly = 0xEDB88320;
uint32_t i;
uint32_t j;
@@ -46,7 +45,7 @@ XZ_EXTERN void xz_crc32_init(void)
return;
}
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
{
crc = ~crc;
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index ef449e97d1a1..8237db17eee3 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -24,7 +23,9 @@ struct xz_dec_bcj {
BCJ_IA64 = 6, /* Big or little endian */
BCJ_ARM = 7, /* Little endian only */
BCJ_ARMTHUMB = 8, /* Little endian only */
- BCJ_SPARC = 9 /* Big or little endian */
+ BCJ_SPARC = 9, /* Big or little endian */
+ BCJ_ARM64 = 10, /* AArch64 */
+ BCJ_RISCV = 11 /* RV32GQC_Zfh, RV64GQC_Zfh */
} type;
/*
@@ -162,7 +163,9 @@ static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr & 0xFC000003) == 0x48000001) {
instr &= 0x03FFFFFC;
@@ -219,7 +222,9 @@ static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
/* Instruction normalized with bit_res for easier manipulation */
uint64_t norm;
- for (i = 0; i + 16 <= size; i += 16) {
+ size &= ~(size_t)15;
+
+ for (i = 0; i < size; i += 16) {
mask = branch_table[buf[i] & 0x1F];
for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
if (((mask >> slot) & 1) == 0)
@@ -267,7 +272,9 @@ static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
if (buf[i + 3] == 0xEB) {
addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
| ((uint32_t)buf[i + 2] << 16);
@@ -290,7 +297,12 @@ static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t addr;
- for (i = 0; i + 4 <= size; i += 2) {
+ if (size < 4)
+ return 0;
+
+ size -= 4;
+
+ for (i = 0; i <= size; i += 2) {
if ((buf[i + 1] & 0xF8) == 0xF0
&& (buf[i + 3] & 0xF8) == 0xF8) {
addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
@@ -318,7 +330,9 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
size_t i;
uint32_t instr;
- for (i = 0; i + 4 <= size; i += 4) {
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
instr <<= 2;
@@ -334,6 +348,140 @@ static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
}
#endif
+#ifdef XZ_DEC_ARM64
+static size_t bcj_arm64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t instr;
+ uint32_t addr;
+
+ size &= ~(size_t)3;
+
+ for (i = 0; i < size; i += 4) {
+ instr = get_unaligned_le32(buf + i);
+
+ if ((instr >> 26) == 0x25) {
+ /* BL instruction */
+ addr = instr - ((s->pos + (uint32_t)i) >> 2);
+ instr = 0x94000000 | (addr & 0x03FFFFFF);
+ put_unaligned_le32(instr, buf + i);
+
+ } else if ((instr & 0x9F000000) == 0x90000000) {
+ /* ADRP instruction */
+ addr = ((instr >> 29) & 3) | ((instr >> 3) & 0x1FFFFC);
+
+ /* Only convert values in the range +/-512 MiB. */
+ if ((addr + 0x020000) & 0x1C0000)
+ continue;
+
+ addr -= (s->pos + (uint32_t)i) >> 12;
+
+ instr &= 0x9000001F;
+ instr |= (addr & 3) << 29;
+ instr |= (addr & 0x03FFFC) << 3;
+ instr |= (0U - (addr & 0x020000)) & 0xE00000;
+
+ put_unaligned_le32(instr, buf + i);
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_RISCV
+static size_t bcj_riscv(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t b1;
+ uint32_t b2;
+ uint32_t b3;
+ uint32_t instr;
+ uint32_t instr2;
+ uint32_t instr2_rs1;
+ uint32_t addr;
+
+ if (size < 8)
+ return 0;
+
+ size -= 8;
+
+ for (i = 0; i <= size; i += 2) {
+ instr = buf[i];
+
+ if (instr == 0xEF) {
+ /* JAL */
+ b1 = buf[i + 1];
+ if ((b1 & 0x0D) != 0)
+ continue;
+
+ b2 = buf[i + 2];
+ b3 = buf[i + 3];
+
+ addr = ((b1 & 0xF0) << 13) | (b2 << 9) | (b3 << 1);
+ addr -= s->pos + (uint32_t)i;
+
+ buf[i + 1] = (uint8_t)((b1 & 0x0F)
+ | ((addr >> 8) & 0xF0));
+
+ buf[i + 2] = (uint8_t)(((addr >> 16) & 0x0F)
+ | ((addr >> 7) & 0x10)
+ | ((addr << 4) & 0xE0));
+
+ buf[i + 3] = (uint8_t)(((addr >> 4) & 0x7F)
+ | ((addr >> 13) & 0x80));
+
+ i += 4 - 2;
+
+ } else if ((instr & 0x7F) == 0x17) {
+ /* AUIPC */
+ instr |= (uint32_t)buf[i + 1] << 8;
+ instr |= (uint32_t)buf[i + 2] << 16;
+ instr |= (uint32_t)buf[i + 3] << 24;
+
+ if (instr & 0xE80) {
+ /* AUIPC's rd doesn't equal x0 or x2. */
+ instr2 = get_unaligned_le32(buf + i + 4);
+
+ if (((instr << 8) ^ (instr2 - 3)) & 0xF8003) {
+ i += 6 - 2;
+ continue;
+ }
+
+ addr = (instr & 0xFFFFF000) + (instr2 >> 20);
+
+ instr = 0x17 | (2 << 7) | (instr2 << 12);
+ instr2 = addr;
+ } else {
+ /* AUIPC's rd equals x0 or x2. */
+ instr2_rs1 = instr >> 27;
+
+ if ((uint32_t)((instr - 0x3117) << 18)
+ >= (instr2_rs1 & 0x1D)) {
+ i += 4 - 2;
+ continue;
+ }
+
+ addr = get_unaligned_be32(buf + i + 4);
+ addr -= s->pos + (uint32_t)i;
+
+ instr2 = (instr >> 12) | (addr << 20);
+
+ instr = 0x17 | (instr2_rs1 << 7)
+ | ((addr + 0x800) & 0xFFFFF000);
+ }
+
+ put_unaligned_le32(instr, buf + i);
+ put_unaligned_le32(instr2, buf + i + 4);
+
+ i += 8 - 2;
+ }
+ }
+
+ return i;
+}
+#endif
+
/*
* Apply the selected BCJ filter. Update *pos and s->pos to match the amount
* of data that got filtered.
@@ -381,6 +529,16 @@ static void bcj_apply(struct xz_dec_bcj *s,
filtered = bcj_sparc(s, buf, size);
break;
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+ filtered = bcj_arm64(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+ filtered = bcj_riscv(s, buf, size);
+ break;
+#endif
default:
/* Never reached but silence compiler warnings. */
filtered = 0;
@@ -414,9 +572,8 @@ static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
* data in chunks of 1-16 bytes. To hide this issue, this function does
* some buffering.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b)
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b)
{
size_t out_start;
@@ -524,7 +681,7 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
return s->ret;
}
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s != NULL)
@@ -533,7 +690,7 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
{
switch (id) {
#ifdef XZ_DEC_X86
@@ -554,6 +711,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
#ifdef XZ_DEC_SPARC
case BCJ_SPARC:
#endif
+#ifdef XZ_DEC_ARM64
+ case BCJ_ARM64:
+#endif
+#ifdef XZ_DEC_RISCV
+ case BCJ_RISCV:
+#endif
break;
default:
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 27ce34520e78..83bb66b6016d 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* LZMA2 decoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -961,8 +960,7 @@ static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
* Take care of the LZMA2 control layer, and forward the job of actual LZMA
* decoding or copying of uncompressed chunks to other functions.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b)
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b)
{
uint32_t tmp;
@@ -1138,8 +1136,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
return XZ_OK;
}
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max)
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -1162,7 +1159,7 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
return s;
}
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
{
/* This limits dictionary size to 3 GiB to keep parsing simpler. */
if (props > 39)
@@ -1198,7 +1195,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
return XZ_OK;
}
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
{
if (DEC_IS_MULTI(s->dict.mode))
vfree(s->dict.buf);
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index 683570b93a8c..f9d003684d56 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* .xz Stream decoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include "xz_private.h"
@@ -747,7 +746,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
* actually succeeds (that's the price to pay of using the output buffer as
* the workspace).
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
{
size_t in_start;
size_t out_start;
@@ -783,7 +782,7 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
return ret;
}
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
@@ -813,7 +812,7 @@ error_bcj:
return NULL;
}
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+void xz_dec_reset(struct xz_dec *s)
{
s->sequence = SEQ_STREAM_HEADER;
s->allow_buf_error = false;
@@ -825,7 +824,7 @@ XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
s->temp.size = STREAM_HEADER_SIZE;
}
-XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+void xz_dec_end(struct xz_dec *s)
{
if (s != NULL) {
xz_dec_lzma2_end(s->lzma2);
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
index 61098c67a413..f40817d65897 100644
--- a/lib/xz/xz_dec_syms.c
+++ b/lib/xz/xz_dec_syms.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder module information
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/module.h>
@@ -23,11 +22,6 @@ EXPORT_SYMBOL(xz_dec_microlzma_end);
#endif
MODULE_DESCRIPTION("XZ decompressor");
-MODULE_VERSION("1.1");
+MODULE_VERSION("1.2");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
index da28a19d6c98..53d3600f2ddb 100644
--- a/lib/xz/xz_dec_test.c
+++ b/lib/xz/xz_dec_test.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: 0BSD
+
/*
* XZ decoder tester
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#include <linux/kernel.h>
@@ -212,9 +211,4 @@ module_exit(xz_dec_test_exit);
MODULE_DESCRIPTION("XZ decompressor tester");
MODULE_VERSION("1.0");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
-
-/*
- * This code is in the public domain, but in Linux it's simplest to just
- * say it's GPL and consider the authors as the copyright holders.
- */
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
index 92d852d4f87a..d2632b7dfb9c 100644
--- a/lib/xz/xz_lzma2.h
+++ b/lib/xz/xz_lzma2.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* LZMA2 definitions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_LZMA2_H
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index bf1e94ec7873..5f1294a1408c 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Private includes and definitions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_PRIVATE_H
@@ -37,6 +36,12 @@
# ifdef CONFIG_XZ_DEC_SPARC
# define XZ_DEC_SPARC
# endif
+# ifdef CONFIG_XZ_DEC_ARM64
+# define XZ_DEC_ARM64
+# endif
+# ifdef CONFIG_XZ_DEC_RISCV
+# define XZ_DEC_RISCV
+# endif
# ifdef CONFIG_XZ_DEC_MICROLZMA
# define XZ_DEC_MICROLZMA
# endif
@@ -98,23 +103,19 @@
*/
#ifndef XZ_DEC_BCJ
# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
- || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+ || defined(XZ_DEC_IA64) \
|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
- || defined(XZ_DEC_SPARC)
+ || defined(XZ_DEC_SPARC) || defined(XZ_DEC_ARM64) \
+ || defined(XZ_DEC_RISCV)
# define XZ_DEC_BCJ
# endif
#endif
-#ifndef CRC32_POLY_LE
-#define CRC32_POLY_LE 0xedb88320
-#endif
-
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
*/
-XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
- uint32_t dict_max);
+struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max);
/*
* Decode the LZMA2 properties (one byte) and reset the decoder. Return
@@ -122,22 +123,20 @@ XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
* big enough, and XZ_OPTIONS_ERROR if props indicates something that this
* decoder doesn't support.
*/
-XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
- uint8_t props);
+enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props);
/* Decode raw LZMA2 stream from b->in to b->out. */
-XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b);
/* Free the memory allocated for the LZMA2 decoder. */
-XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
#ifdef XZ_DEC_BCJ
/*
* Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
* calling xz_dec_bcj_run().
*/
-XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
/*
* Decode the Filter ID of a BCJ filter. This implementation doesn't
@@ -145,16 +144,15 @@ XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
* is needed. Returns XZ_OK if the given Filter ID is supported.
* Otherwise XZ_OPTIONS_ERROR is returned.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
/*
* Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
* a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
* must be called directly.
*/
-XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
- struct xz_dec_lzma2 *lzma2,
- struct xz_buf *b);
+enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b);
/* Free the memory allocated for the BCJ filters. */
#define xz_dec_bcj_end(s) kfree(s)
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
index 430bb3a0d195..55f9f6f94b78 100644
--- a/lib/xz/xz_stream.h
+++ b/lib/xz/xz_stream.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Definitions for handling the .xz file format
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_STREAM_H
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
index f620cafca633..16bb995bc6c4 100644
--- a/lib/zstd/compress/zstd_compress.c
+++ b/lib/zstd/compress/zstd_compress.c
@@ -4810,6 +4810,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
dictLoadMethod, cctxParams.cParams,
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
+ if (!cdict)
+ return NULL;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
index 04e1b5c01d9b..bd8784449b31 100644
--- a/lib/zstd/zstd_compress_module.c
+++ b/lib/zstd/zstd_compress_module.c
@@ -66,6 +66,12 @@ int zstd_max_clevel(void)
}
EXPORT_SYMBOL(zstd_max_clevel);
+int zstd_default_clevel(void)
+{
+ return ZSTD_defaultCLevel();
+}
+EXPORT_SYMBOL(zstd_default_clevel);
+
size_t zstd_compress_bound(size_t src_size)
{
return ZSTD_compressBound(src_size);
@@ -79,6 +85,13 @@ zstd_parameters zstd_get_params(int level,
}
EXPORT_SYMBOL(zstd_get_params);
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size)
+{
+ return ZSTD_getCParams(level, estimated_src_size, dict_size);
+}
+EXPORT_SYMBOL(zstd_get_cparams);
+
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
@@ -93,6 +106,33 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
}
EXPORT_SYMBOL(zstd_init_cctx);
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cctx_advanced);
+
+size_t zstd_free_cctx(zstd_cctx *cctx)
+{
+ return ZSTD_freeCCtx(cctx);
+}
+EXPORT_SYMBOL(zstd_free_cctx);
+
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createCDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, cparams, custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_cdict_byreference);
+
+size_t zstd_free_cdict(zstd_cdict *cdict)
+{
+ return ZSTD_freeCDict(cdict);
+}
+EXPORT_SYMBOL(zstd_free_cdict);
+
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters)
{
@@ -101,6 +141,15 @@ size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_compress_cctx);
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const ZSTD_CDict *cdict)
+{
+ return ZSTD_compress_usingCDict(cctx, dst, dst_capacity,
+ src, src_size, cdict);
+}
+EXPORT_SYMBOL(zstd_compress_using_cdict);
+
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCStreamSize_usingCParams(*cparams);
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
index f4ed952ed485..469fc3059be0 100644
--- a/lib/zstd/zstd_decompress_module.c
+++ b/lib/zstd/zstd_decompress_module.c
@@ -44,6 +44,33 @@ size_t zstd_dctx_workspace_bound(void)
}
EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDCtx_advanced(custom_mem);
+}
+EXPORT_SYMBOL(zstd_create_dctx_advanced);
+
+size_t zstd_free_dctx(zstd_dctx *dctx)
+{
+ return ZSTD_freeDCtx(dctx);
+}
+EXPORT_SYMBOL(zstd_free_dctx);
+
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem)
+{
+ return ZSTD_createDDict_advanced(dict, dict_size, ZSTD_dlm_byRef,
+ ZSTD_dct_auto, custom_mem);
+
+}
+EXPORT_SYMBOL(zstd_create_ddict_byreference);
+
+size_t zstd_free_ddict(zstd_ddict *ddict)
+{
+ return ZSTD_freeDDict(ddict);
+}
+EXPORT_SYMBOL(zstd_free_ddict);
+
zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
@@ -59,6 +86,15 @@ size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
}
EXPORT_SYMBOL(zstd_decompress_dctx);
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void* src, size_t src_size,
+ const zstd_ddict* ddict)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dst_capacity, src,
+ src_size, ddict);
+}
+EXPORT_SYMBOL(zstd_decompress_using_ddict);
+
size_t zstd_dstream_workspace_bound(size_t max_window_size)
{
return ZSTD_estimateDStreamSize(max_window_size);