diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 7 | ||||
-rw-r--r-- | lib/Kconfig.debug | 3 | ||||
-rw-r--r-- | lib/Kconfig.ubsan | 29 | ||||
-rw-r--r-- | lib/Makefile | 8 | ||||
-rw-r--r-- | lib/debugobjects.c | 2 | ||||
-rw-r--r-- | lib/div64.c | 6 | ||||
-rw-r--r-- | lib/iomap_copy.c | 21 | ||||
-rw-r--r-- | lib/irq_poll.c | 222 | ||||
-rw-r--r-- | lib/libcrc32c.c | 2 | ||||
-rw-r--r-- | lib/lru_cache.c | 4 | ||||
-rw-r--r-- | lib/ratelimit.c | 2 | ||||
-rw-r--r-- | lib/string_helpers.c | 63 | ||||
-rw-r--r-- | lib/strncpy_from_user.c | 11 | ||||
-rw-r--r-- | lib/strnlen_user.c | 18 | ||||
-rw-r--r-- | lib/test_hexdump.c (renamed from lib/test-hexdump.c) | 146 | ||||
-rw-r--r-- | lib/ubsan.c | 456 | ||||
-rw-r--r-- | lib/ubsan.h | 84 |
17 files changed, 1003 insertions, 81 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5a0c1c83cdf0..133ebc0c1773 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -210,9 +210,11 @@ config RANDOM32_SELFTEST # compression support is select'ed if needed # config 842_COMPRESS + select CRC32 tristate config 842_DECOMPRESS + select CRC32 tristate config ZLIB_INFLATE @@ -475,6 +477,11 @@ config DDR information. This data is useful for drivers handling DDR SDRAM controllers. +config IRQ_POLL + bool "IRQ polling library" + help + Helper library to poll interrupt mitigation using polling. + config MPILIB tristate select CLZ_TAB diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f75a33f29f6e..ecb9e75614bf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1893,6 +1893,8 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" +source "lib/Kconfig.ubsan" + config ARCH_HAS_DEVMEM_IS_ALLOWED bool @@ -1919,7 +1921,6 @@ config STRICT_DEVMEM config IO_STRICT_DEVMEM bool "Filter I/O access to /dev/mem" depends on STRICT_DEVMEM - default STRICT_DEVMEM ---help--- If this option is disabled, you allow userspace (root) access to all io-memory regardless of whether a driver is actively using that diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan new file mode 100644 index 000000000000..49518fb48cab --- /dev/null +++ b/lib/Kconfig.ubsan @@ -0,0 +1,29 @@ +config ARCH_HAS_UBSAN_SANITIZE_ALL + bool + +config UBSAN + bool "Undefined behaviour sanity checker" + help + This option enables undefined behaviour sanity checker + Compile-time instrumentation is used to detect various undefined + behaviours in runtime. Various types of checks may be enabled + via boot parameter ubsan_handle (see: Documentation/ubsan.txt). + +config UBSAN_SANITIZE_ALL + bool "Enable instrumentation for the entire kernel" + depends on UBSAN + depends on ARCH_HAS_UBSAN_SANITIZE_ALL + default y + help + This option activates instrumentation for the entire kernel. + If you don't enable this option, you have to explicitly specify + UBSAN_SANITIZE := y for the files/directories you want to check for UB. + +config UBSAN_ALIGNMENT + bool "Enable checking of pointers alignment" + depends on UBSAN + default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS + help + This option enables detection of unaligned memory accesses. + Enabling this option on architectures that support unalligned + accesses may produce a lot of false positives. diff --git a/lib/Makefile b/lib/Makefile index 180dd4d0dd41..a7c26a41a738 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -31,7 +31,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o -obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o +obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o obj-y += kstrtox.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o @@ -154,7 +154,7 @@ obj-$(CONFIG_GLOB) += glob.o obj-$(CONFIG_MPILIB) += mpi/ obj-$(CONFIG_SIGNATURE) += digsig.o -obj-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_CLZ_TAB) += clz_tab.o obj-$(CONFIG_DDR) += jedec_ddr_data.o @@ -165,6 +165,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o +obj-$(CONFIG_IRQ_POLL) += irq_poll.o libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o @@ -209,3 +210,6 @@ quiet_cmd_build_OID_registry = GEN $@ clean-files += oid_registry_data.c obj-$(CONFIG_UCS2_STRING) += ucs2_string.o +obj-$(CONFIG_UBSAN) += ubsan.o + +UBSAN_SANITIZE_ubsan.o := n diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 547f7f923dbc..519b5a10fd70 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -21,7 +21,7 @@ #define ODEBUG_HASH_BITS 14 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) -#define ODEBUG_POOL_SIZE 512 +#define ODEBUG_POOL_SIZE 1024 #define ODEBUG_POOL_MIN_LEVEL 256 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT diff --git a/lib/div64.c b/lib/div64.c index 62a698a432bc..7f345259c32f 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -13,7 +13,8 @@ * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific - * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. + * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S + * or by defining a preprocessor macro in arch/include/asm/div64.h. */ #include <linux/export.h> @@ -23,6 +24,7 @@ /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 +#ifndef __div64_32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; @@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) *n = res; return rem; } - EXPORT_SYMBOL(__div64_32); +#endif #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 4527e751b5e0..b8f1d6cbb200 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -42,6 +42,27 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, EXPORT_SYMBOL_GPL(__iowrite32_copy); /** + * __ioread32_copy - copy data from MMIO space, in 32-bit units + * @to: destination (must be 32-bit aligned) + * @from: source, in MMIO space (must be 32-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from MMIO space to kernel space, in units of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __ioread32_copy(void *to, const void __iomem *from, size_t count) +{ + u32 *dst = to; + const u32 __iomem *src = from; + const u32 __iomem *end = src + count; + + while (src < end) + *dst++ = __raw_readl(src++); +} +EXPORT_SYMBOL_GPL(__ioread32_copy); + +/** * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units * @to: destination, in MMIO space (must be 64-bit aligned) * @from: source (must be 64-bit aligned) diff --git a/lib/irq_poll.c b/lib/irq_poll.c new file mode 100644 index 000000000000..836f7db4e548 --- /dev/null +++ b/lib/irq_poll.c @@ -0,0 +1,222 @@ +/* + * Functions related to interrupt-poll handling in the block layer. This + * is similar to NAPI for network devices. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> +#include <linux/interrupt.h> +#include <linux/cpu.h> +#include <linux/irq_poll.h> +#include <linux/delay.h> + +static unsigned int irq_poll_budget __read_mostly = 256; + +static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); + +/** + * irq_poll_sched - Schedule a run of the iopoll handler + * @iop: The parent iopoll structure + * + * Description: + * Add this irq_poll structure to the pending poll list and trigger the + * raise of the blk iopoll softirq. + **/ +void irq_poll_sched(struct irq_poll *iop) +{ + unsigned long flags; + + if (test_bit(IRQ_POLL_F_DISABLE, &iop->state)) + return; + if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) + return; + + local_irq_save(flags); + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_restore(flags); +} +EXPORT_SYMBOL(irq_poll_sched); + +/** + * __irq_poll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * See irq_poll_complete(). This function must be called with interrupts + * disabled. + **/ +static void __irq_poll_complete(struct irq_poll *iop) +{ + list_del(&iop->list); + smp_mb__before_atomic(); + clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); +} + +/** + * irq_poll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * If a driver consumes less than the assigned budget in its run of the + * iopoll handler, it'll end the polled mode by calling this function. The + * iopoll handler will not be invoked again before irq_poll_sched() + * is called. + **/ +void irq_poll_complete(struct irq_poll *iop) +{ + unsigned long flags; + + local_irq_save(flags); + __irq_poll_complete(iop); + local_irq_restore(flags); +} +EXPORT_SYMBOL(irq_poll_complete); + +static void irq_poll_softirq(struct softirq_action *h) +{ + struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); + int rearm = 0, budget = irq_poll_budget; + unsigned long start_time = jiffies; + + local_irq_disable(); + + while (!list_empty(list)) { + struct irq_poll *iop; + int work, weight; + + /* + * If softirq window is exhausted then punt. + */ + if (budget <= 0 || time_after(jiffies, start_time)) { + rearm = 1; + break; + } + + local_irq_enable(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new + * entries to the tail of this list, and only ->poll() + * calls can remove this head entry from the list. + */ + iop = list_entry(list->next, struct irq_poll, list); + + weight = iop->weight; + work = 0; + if (test_bit(IRQ_POLL_F_SCHED, &iop->state)) + work = iop->poll(iop, weight); + + budget -= work; + + local_irq_disable(); + + /* + * Drivers must not modify the iopoll state, if they + * consume their assigned weight (or more, some drivers can't + * easily just stop processing, they have to complete an + * entire mask of commands).In such cases this code + * still "owns" the iopoll instance and therefore can + * move the instance around on the list at-will. + */ + if (work >= weight) { + if (test_bit(IRQ_POLL_F_DISABLE, &iop->state)) + __irq_poll_complete(iop); + else + list_move_tail(&iop->list, list); + } + } + + if (rearm) + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + + local_irq_enable(); +} + +/** + * irq_poll_disable - Disable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Disable io polling and wait for any pending callbacks to have completed. + **/ +void irq_poll_disable(struct irq_poll *iop) +{ + set_bit(IRQ_POLL_F_DISABLE, &iop->state); + while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state)) + msleep(1); + clear_bit(IRQ_POLL_F_DISABLE, &iop->state); +} +EXPORT_SYMBOL(irq_poll_disable); + +/** + * irq_poll_enable - Enable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Enable iopoll on this @iop. Note that the handler run will not be + * scheduled, it will only mark it as active. + **/ +void irq_poll_enable(struct irq_poll *iop) +{ + BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state)); + smp_mb__before_atomic(); + clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(irq_poll_enable); + +/** + * irq_poll_init - Initialize this @iop + * @iop: The parent iopoll structure + * @weight: The default weight (or command completion budget) + * @poll_fn: The handler to invoke + * + * Description: + * Initialize and enable this irq_poll structure. + **/ +void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn) +{ + memset(iop, 0, sizeof(*iop)); + INIT_LIST_HEAD(&iop->list); + iop->weight = weight; + iop->poll = poll_fn; +} +EXPORT_SYMBOL(irq_poll_init); + +static int irq_poll_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + int cpu = (unsigned long) hcpu; + + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_enable(); + } + + return NOTIFY_OK; +} + +static struct notifier_block irq_poll_cpu_notifier = { + .notifier_call = irq_poll_cpu_notify, +}; + +static __init int irq_poll_setup(void) +{ + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); + + open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); + register_hotcpu_notifier(&irq_poll_cpu_notifier); + return 0; +} +subsys_initcall(irq_poll_setup); diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 6a08ce7d6adc..74a54b7f2562 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -36,6 +36,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/crc32c.h> static struct crypto_shash *tfm; @@ -74,3 +75,4 @@ module_exit(libcrc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crc32c"); diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 028f5d996eef..28ba40b99337 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -238,7 +238,7 @@ void lc_reset(struct lru_cache *lc) * @seq: the seq_file to print into * @lc: the lru cache to print statistics of */ -size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) +void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) { /* NOTE: * total calls to lc_get are @@ -250,8 +250,6 @@ size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n", lc->name, lc->used, lc->nr_elements, lc->hits, lc->misses, lc->starving, lc->locked, lc->changed); - - return 0; } static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 40e03ea2a967..2c5de86460c5 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) if (rs->missed) printk(KERN_WARNING "%s: %d callbacks suppressed\n", func, rs->missed); - rs->begin = 0; + rs->begin = jiffies; rs->printed = 0; rs->missed = 0; } diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 5939f63d90cd..5c88204b6f1f 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, [STRING_UNITS_10] = 1000, [STRING_UNITS_2] = 1024, }; - int i, j; - u32 remainder = 0, sf_cap, exp; + static const unsigned int rounding[] = { 500, 50, 5 }; + int i = 0, j; + u32 remainder = 0, sf_cap; char tmp[8]; const char *unit; tmp[0] = '\0'; - i = 0; - if (!size) + + if (blk_size == 0) + size = 0; + if (size == 0) goto out; - while (blk_size >= divisor[units]) { - remainder = do_div(blk_size, divisor[units]); + /* This is Napier's algorithm. Reduce the original block size to + * + * coefficient * divisor[units]^i + * + * we do the reduction so both coefficients are just under 32 bits so + * that multiplying them together won't overflow 64 bits and we keep + * as much precision as possible in the numbers. + * + * Note: it's safe to throw away the remainders here because all the + * precision is in the coefficients. + */ + while (blk_size >> 32) { + do_div(blk_size, divisor[units]); i++; } - exp = divisor[units] / (u32)blk_size; - /* - * size must be strictly greater than exp here to ensure that remainder - * is greater than divisor[units] coming out of the if below. - */ - if (size > exp) { - remainder = do_div(size, divisor[units]); - remainder *= blk_size; + while (size >> 32) { + do_div(size, divisor[units]); i++; - } else { - remainder *= size; } + /* now perform the actual multiplication keeping i as the sum of the + * two logarithms */ size *= blk_size; - size += remainder / divisor[units]; - remainder %= divisor[units]; + /* and logarithmically reduce it until it's just under the divisor */ while (size >= divisor[units]) { remainder = do_div(size, divisor[units]); i++; } + /* work out in j how many digits of precision we need from the + * remainder */ sf_cap = size; for (j = 0; sf_cap*10 < 1000; j++) sf_cap *= 10; - if (j) { + if (units == STRING_UNITS_2) { + /* express the remainder as a decimal. It's currently the + * numerator of a fraction whose denominator is + * divisor[units], which is 1 << 10 for STRING_UNITS_2 */ remainder *= 1000; - remainder /= divisor[units]; + remainder >>= 10; + } + + /* add a 5 to the digit below what will be printed to ensure + * an arithmetical round up and carry it through to size */ + remainder += rounding[j]; + if (remainder >= 1000) { + remainder -= 1000; + size += 1; + } + + if (j) { snprintf(tmp, sizeof(tmp), ".%03u", remainder); tmp[j+1] = '\0'; } diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index e0af6ff73d14..33840324138c 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long unsigned long c, data; /* Fall back to byte-at-a-time if we get a page fault */ - if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) + if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) break; *(unsigned long *)(dst+res) = c; if (has_zero(c, &data, &constants)) { @@ -55,7 +55,7 @@ byte_at_a_time: while (max) { char c; - if (unlikely(__get_user(c,src+res))) + if (unlikely(unsafe_get_user(c,src+res))) return -EFAULT; dst[res] = c; if (!c) @@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count) src_addr = (unsigned long)src; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; - return do_strncpy_from_user(dst, src, count, max); + long retval; + + user_access_begin(); + retval = do_strncpy_from_user(dst, src, count, max); + user_access_end(); + return retval; } return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 3a5f2b366d84..2625943625d7 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, src -= align; max += align; - if (unlikely(__get_user(c,(unsigned long __user *)src))) + if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) return 0; c |= aligned_byte_mask(align); @@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); - if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) + if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) return 0; } res -= align; @@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count) src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; - return do_strnlen_user(str, count, max); + long retval; + + user_access_begin(); + retval = do_strnlen_user(str, count, max); + user_access_end(); + return retval; } return 0; } @@ -141,7 +146,12 @@ long strlen_user(const char __user *str) src_addr = (unsigned long)str; if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; - return do_strnlen_user(str, ~0ul, max); + long retval; + + user_access_begin(); + retval = do_strnlen_user(str, ~0ul, max); + user_access_end(); + return retval; } return 0; } diff --git a/lib/test-hexdump.c b/lib/test_hexdump.c index 5241df36eedf..3f415d8101f3 100644 --- a/lib/test-hexdump.c +++ b/lib/test_hexdump.c @@ -42,19 +42,21 @@ static const char * const test_data_8_le[] __initconst = { "e9ac0f9cad319ca6", "0cafb1439919d14c", }; -static void __init test_hexdump(size_t len, int rowsize, int groupsize, - bool ascii) +#define FILL_CHAR '#' + +static unsigned total_tests __initdata; +static unsigned failed_tests __initdata; + +static void __init test_hexdump_prepare_test(size_t len, int rowsize, + int groupsize, char *test, + size_t testlen, bool ascii) { - char test[32 * 3 + 2 + 32 + 1]; - char real[32 * 3 + 2 + 32 + 1]; char *p; const char * const *result; size_t l = len; int gs = groupsize, rs = rowsize; unsigned int i; - hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii); - if (rs != 16 && rs != 32) rs = 16; @@ -73,8 +75,6 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, else result = test_data_1_le; - memset(test, ' ', sizeof(test)); - /* hex dump */ p = test; for (i = 0; i < l / gs; i++) { @@ -82,24 +82,49 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize, size_t amount = strlen(q); strncpy(p, q, amount); - p += amount + 1; + p += amount; + + *p++ = ' '; } if (i) p--; /* ASCII part */ if (ascii) { - p = test + rs * 2 + rs / gs + 1; + do { + *p++ = ' '; + } while (p < test + rs * 2 + rs / gs + 1); + strncpy(p, data_a, l); p += l; } *p = '\0'; +} - if (strcmp(test, real)) { +#define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1) + +static void __init test_hexdump(size_t len, int rowsize, int groupsize, + bool ascii) +{ + char test[TEST_HEXDUMP_BUF_SIZE]; + char real[TEST_HEXDUMP_BUF_SIZE]; + + total_tests++; + + memset(real, FILL_CHAR, sizeof(real)); + hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real), + ascii); + + memset(test, FILL_CHAR, sizeof(test)); + test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test), + ascii); + + if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) { pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); pr_err("Result: '%s'\n", real); pr_err("Expect: '%s'\n", test); + failed_tests++; } } @@ -114,52 +139,72 @@ static void __init test_hexdump_set(int rowsize, bool ascii) test_hexdump(len, rowsize, 1, ascii); } -static void __init test_hexdump_overflow(bool ascii) +static void __init test_hexdump_overflow(size_t buflen, size_t len, + int rowsize, int groupsize, + bool ascii) { - char buf[56]; - const char *t = test_data_1_le[0]; - size_t l = get_random_int() % sizeof(buf); + char test[TEST_HEXDUMP_BUF_SIZE]; + char buf[TEST_HEXDUMP_BUF_SIZE]; + int rs = rowsize, gs = groupsize; + int ae, he, e, f, r; bool a; - int e, r; - memset(buf, ' ', sizeof(buf)); + total_tests++; + + memset(buf, FILL_CHAR, sizeof(buf)); - r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii); + r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii); + + /* + * Caller must provide the data length multiple of groupsize. The + * calculations below are made with that assumption in mind. + */ + ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */; + he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */; if (ascii) - e = 50; + e = ae; else - e = 2; - buf[e + 2] = '\0'; - - if (!l) { - a = r == e && buf[0] == ' '; - } else if (l < 3) { - a = r == e && buf[0] == '\0'; - } else if (l < 4) { - a = r == e && !strcmp(buf, t); - } else if (ascii) { - if (l < 51) - a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' '; - else - a = r == e && buf[50] == '\0' && buf[49] == '.'; - } else { - a = r == e && buf[e] == '\0'; + e = he; + + f = min_t(int, e + 1, buflen); + if (buflen) { + test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii); + test[f - 1] = '\0'; } + memset(test + f, FILL_CHAR, sizeof(test) - f); + + a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE); + + buf[sizeof(buf) - 1] = '\0'; if (!a) { - pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf)); - pr_err("Result: '%s'\n", buf); + pr_err("Len: %zu buflen: %zu strlen: %zu\n", + len, buflen, strnlen(buf, sizeof(buf))); + pr_err("Result: %d '%s'\n", r, buf); + pr_err("Expect: %d '%s'\n", e, test); + failed_tests++; } } +static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) +{ + unsigned int i = 0; + int rs = (get_random_int() % 2 + 1) * 16; + + do { + int gs = 1 << i; + size_t len = get_random_int() % rs + gs; + + test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); + } while (i++ < 3); +} + static int __init test_hexdump_init(void) { unsigned int i; int rowsize; - pr_info("Running tests...\n"); - rowsize = (get_random_int() % 2 + 1) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, false); @@ -168,13 +213,26 @@ static int __init test_hexdump_init(void) for (i = 0; i < 16; i++) test_hexdump_set(rowsize, true); - for (i = 0; i < 16; i++) - test_hexdump_overflow(false); + for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) + test_hexdump_overflow_set(i, false); - for (i = 0; i < 16; i++) - test_hexdump_overflow(true); + for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) + test_hexdump_overflow_set(i, true); + + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_err("failed %u out of %u tests\n", failed_tests, total_tests); - return -EINVAL; + return failed_tests ? -EINVAL : 0; } module_init(test_hexdump_init); + +static void __exit test_hexdump_exit(void) +{ + /* do nothing */ +} +module_exit(test_hexdump_exit); + +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/ubsan.c b/lib/ubsan.c new file mode 100644 index 000000000000..8799ae5e2e42 --- /dev/null +++ b/lib/ubsan.c @@ -0,0 +1,456 @@ +/* + * UBSAN error reporting functions + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/sched.h> + +#include "ubsan.h" + +const char *type_check_kinds[] = { + "load of", + "store to", + "reference binding to", + "member access within", + "member call on", + "constructor call on", + "downcast of", + "downcast of" +}; + +#define REPORTED_BIT 31 + +#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) +#define COLUMN_MASK (~(1U << REPORTED_BIT)) +#define LINE_MASK (~0U) +#else +#define COLUMN_MASK (~0U) +#define LINE_MASK (~(1U << REPORTED_BIT)) +#endif + +#define VALUE_LENGTH 40 + +static bool was_reported(struct source_location *location) +{ + return test_and_set_bit(REPORTED_BIT, &location->reported); +} + +static void print_source_location(const char *prefix, + struct source_location *loc) +{ + pr_err("%s %s:%d:%d\n", prefix, loc->file_name, + loc->line & LINE_MASK, loc->column & COLUMN_MASK); +} + +static bool suppress_report(struct source_location *loc) +{ + return current->in_ubsan || was_reported(loc); +} + +static bool type_is_int(struct type_descriptor *type) +{ + return type->type_kind == type_kind_int; +} + +static bool type_is_signed(struct type_descriptor *type) +{ + WARN_ON(!type_is_int(type)); + return type->type_info & 1; +} + +static unsigned type_bit_width(struct type_descriptor *type) +{ + return 1 << (type->type_info >> 1); +} + +static bool is_inline_int(struct type_descriptor *type) +{ + unsigned inline_bits = sizeof(unsigned long)*8; + unsigned bits = type_bit_width(type); + + WARN_ON(!type_is_int(type)); + + return bits <= inline_bits; +} + +static s_max get_signed_val(struct type_descriptor *type, unsigned long val) +{ + if (is_inline_int(type)) { + unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); + return ((s_max)val) << extra_bits >> extra_bits; + } + + if (type_bit_width(type) == 64) + return *(s64 *)val; + + return *(s_max *)val; +} + +static bool val_is_negative(struct type_descriptor *type, unsigned long val) +{ + return type_is_signed(type) && get_signed_val(type, val) < 0; +} + +static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) +{ + if (is_inline_int(type)) + return val; + + if (type_bit_width(type) == 64) + return *(u64 *)val; + + return *(u_max *)val; +} + +static void val_to_string(char *str, size_t size, struct type_descriptor *type, + unsigned long value) +{ + if (type_is_int(type)) { + if (type_bit_width(type) == 128) { +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + u_max val = get_unsigned_val(type, value); + + scnprintf(str, size, "0x%08x%08x%08x%08x", + (u32)(val >> 96), + (u32)(val >> 64), + (u32)(val >> 32), + (u32)(val)); +#else + WARN_ON(1); +#endif + } else if (type_is_signed(type)) { + scnprintf(str, size, "%lld", + (s64)get_signed_val(type, value)); + } else { + scnprintf(str, size, "%llu", + (u64)get_unsigned_val(type, value)); + } + } +} + +static bool location_is_valid(struct source_location *loc) +{ + return loc->file_name != NULL; +} + +static DEFINE_SPINLOCK(report_lock); + +static void ubsan_prologue(struct source_location *location, + unsigned long *flags) +{ + current->in_ubsan++; + spin_lock_irqsave(&report_lock, *flags); + + pr_err("========================================" + "========================================\n"); + print_source_location("UBSAN: Undefined behaviour in", location); +} + +static void ubsan_epilogue(unsigned long *flags) +{ + dump_stack(); + pr_err("========================================" + "========================================\n"); + spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; +} + +static void handle_overflow(struct overflow_data *data, unsigned long lhs, + unsigned long rhs, char op) +{ + + struct type_descriptor *type = data->type; + unsigned long flags; + char lhs_val_str[VALUE_LENGTH]; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); + val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); + pr_err("%s integer overflow:\n", + type_is_signed(type) ? "signed" : "unsigned"); + pr_err("%s %c %s cannot be represented in type %s\n", + lhs_val_str, + op, + rhs_val_str, + type->type_name); + + ubsan_epilogue(&flags); +} + +void __ubsan_handle_add_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + + handle_overflow(data, lhs, rhs, '+'); +} +EXPORT_SYMBOL(__ubsan_handle_add_overflow); + +void __ubsan_handle_sub_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + handle_overflow(data, lhs, rhs, '-'); +} +EXPORT_SYMBOL(__ubsan_handle_sub_overflow); + +void __ubsan_handle_mul_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + handle_overflow(data, lhs, rhs, '*'); +} +EXPORT_SYMBOL(__ubsan_handle_mul_overflow); + +void __ubsan_handle_negate_overflow(struct overflow_data *data, + unsigned long old_val) +{ + unsigned long flags; + char old_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); + + pr_err("negation of %s cannot be represented in type %s:\n", + old_val_str, data->type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_negate_overflow); + + +void __ubsan_handle_divrem_overflow(struct overflow_data *data, + unsigned long lhs, + unsigned long rhs) +{ + unsigned long flags; + char rhs_val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + + if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) + pr_err("division of %s by -1 cannot be represented in type %s\n", + rhs_val_str, data->type->type_name); + else + pr_err("division by zero\n"); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); + +static void handle_null_ptr_deref(struct type_mismatch_data *data) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("%s null pointer of type %s\n", + type_check_kinds[data->type_check_kind], + data->type->type_name); + + ubsan_epilogue(&flags); +} + +static void handle_missaligned_access(struct type_mismatch_data *data, + unsigned long ptr) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("%s misaligned address %p for type %s\n", + type_check_kinds[data->type_check_kind], + (void *)ptr, data->type->type_name); + pr_err("which requires %ld byte alignment\n", data->alignment); + + ubsan_epilogue(&flags); +} + +static void handle_object_size_mismatch(struct type_mismatch_data *data, + unsigned long ptr) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + pr_err("%s address %pk with insufficient space\n", + type_check_kinds[data->type_check_kind], + (void *) ptr); + pr_err("for an object of type %s\n", data->type->type_name); + ubsan_epilogue(&flags); +} + +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, + unsigned long ptr) +{ + + if (!ptr) + handle_null_ptr_deref(data); + else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) + handle_missaligned_access(data, ptr); + else + handle_object_size_mismatch(data, ptr); +} +EXPORT_SYMBOL(__ubsan_handle_type_mismatch); + +void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) +{ + unsigned long flags; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + pr_err("null pointer returned from function declared to never return null\n"); + + if (location_is_valid(&data->attr_location)) + print_source_location("returns_nonnull attribute specified in", + &data->attr_location); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_nonnull_return); + +void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, + unsigned long bound) +{ + unsigned long flags; + char bound_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(bound_str, sizeof(bound_str), data->type, bound); + pr_err("variable length array bound value %s <= 0\n", bound_str); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); + +void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, + unsigned long index) +{ + unsigned long flags; + char index_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(index_str, sizeof(index_str), data->index_type, index); + pr_err("index %s is out of range for type %s\n", index_str, + data->array_type->type_name); + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); + +void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, + unsigned long lhs, unsigned long rhs) +{ + unsigned long flags; + struct type_descriptor *rhs_type = data->rhs_type; + struct type_descriptor *lhs_type = data->lhs_type; + char rhs_str[VALUE_LENGTH]; + char lhs_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); + val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); + + if (val_is_negative(rhs_type, rhs)) + pr_err("shift exponent %s is negative\n", rhs_str); + + else if (get_unsigned_val(rhs_type, rhs) >= + type_bit_width(lhs_type)) + pr_err("shift exponent %s is too large for %u-bit type %s\n", + rhs_str, + type_bit_width(lhs_type), + lhs_type->type_name); + else if (val_is_negative(lhs_type, lhs)) + pr_err("left shift of negative value %s\n", + lhs_str); + else + pr_err("left shift of %s by %s places cannot be" + " represented in type %s\n", + lhs_str, rhs_str, + lhs_type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); + + +void __noreturn +__ubsan_handle_builtin_unreachable(struct unreachable_data *data) +{ + unsigned long flags; + + ubsan_prologue(&data->location, &flags); + pr_err("calling __builtin_unreachable()\n"); + ubsan_epilogue(&flags); + panic("can't return from __builtin_unreachable()"); +} +EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); + +void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, + unsigned long val) +{ + unsigned long flags; + char val_str[VALUE_LENGTH]; + + if (suppress_report(&data->location)) + return; + + ubsan_prologue(&data->location, &flags); + + val_to_string(val_str, sizeof(val_str), data->type, val); + + pr_err("load of value %s is not a valid value for type %s\n", + val_str, data->type->type_name); + + ubsan_epilogue(&flags); +} +EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); diff --git a/lib/ubsan.h b/lib/ubsan.h new file mode 100644 index 000000000000..b2d18d4a53f5 --- /dev/null +++ b/lib/ubsan.h @@ -0,0 +1,84 @@ +#ifndef _LIB_UBSAN_H +#define _LIB_UBSAN_H + +enum { + type_kind_int = 0, + type_kind_float = 1, + type_unknown = 0xffff +}; + +struct type_descriptor { + u16 type_kind; + u16 type_info; + char type_name[1]; +}; + +struct source_location { + const char *file_name; + union { + unsigned long reported; + struct { + u32 line; + u32 column; + }; + }; +}; + +struct overflow_data { + struct source_location location; + struct type_descriptor *type; +}; + +struct type_mismatch_data { + struct source_location location; + struct type_descriptor *type; + unsigned long alignment; + unsigned char type_check_kind; +}; + +struct nonnull_arg_data { + struct source_location location; + struct source_location attr_location; + int arg_index; +}; + +struct nonnull_return_data { + struct source_location location; + struct source_location attr_location; +}; + +struct vla_bound_data { + struct source_location location; + struct type_descriptor *type; +}; + +struct out_of_bounds_data { + struct source_location location; + struct type_descriptor *array_type; + struct type_descriptor *index_type; +}; + +struct shift_out_of_bounds_data { + struct source_location location; + struct type_descriptor *lhs_type; + struct type_descriptor *rhs_type; +}; + +struct unreachable_data { + struct source_location location; +}; + +struct invalid_value_data { + struct source_location location; + struct type_descriptor *type; +}; + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) +typedef __int128 s_max; +typedef unsigned __int128 u_max; +#else +typedef s64 s_max; +typedef u64 u_max; +#endif + +#endif |