diff options
Diffstat (limited to 'lib')
40 files changed, 2070 insertions, 789 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f9..fe01d418b09a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -404,4 +404,7 @@ config OID_REGISTRY help Enable fast lookup object identifier registry. +config UCS2_STRING + tristate + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4a7f808fa06..566cf2bc08ea 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -674,7 +674,7 @@ config STACKTRACE config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" - depends on DEBUG_KERNEL && !IA64 && !PARISC + depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG help Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T and sysrq-P debug output. @@ -855,7 +855,7 @@ config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ (CRIS || M68K || FRV || UML || \ - AVR32 || SUPERH || BLACKFIN || MN10300) || \ + AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ ARCH_WANT_FRAME_POINTERS default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS help @@ -1192,7 +1192,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT bash: echo: write error: Cannot allocate memory To compile this code as a module, choose M here: the module will - be called pSeries-reconfig-notifier-error-inject. + be called memory-notifier-error-inject. If unsure, say N. @@ -1209,7 +1209,7 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT notified, write the error code to "actions/<notifier event>/error". To compile this code as a module, choose M here: the module will - be called memory-notifier-error-inject. + be called of-reconfig-notifier-error-inject. If unsure, say N. @@ -1292,6 +1292,24 @@ config LATENCYTOP Enable this option if you want to use the LatencyTOP tool to find out which userspace is blocking on what kernel operations. +config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + bool + +config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict user copy size checks" + depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING + help + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time failures. + + The copy_from_user() etc checks are there to help test if there + are sufficient security checks on the length argument of + the copy operation, by having gcc prove that the argument is + within bounds. + + If unsure, say N. + source mm/Kconfig.debug source kernel/trace/Kconfig @@ -1463,5 +1481,8 @@ source "lib/Kconfig.kgdb" source "lib/Kconfig.kmemcheck" +config TEST_STRING_HELPERS + tristate "Test functions located in the string_helpers module at runtime" + config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index dbb58ae1b8e0..140e87824173 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -80,4 +80,22 @@ config KDB_KEYBOARD help KDB can use a PS/2 type keyboard for an input device +config KDB_CONTINUE_CATASTROPHIC + int "KDB: continue after catastrophic errors" + depends on KGDB_KDB + default "0" + help + This integer controls the behaviour of kdb when the kernel gets a + catastrophic error, i.e. for a panic or oops. + When KDB is active and a catastrophic error occurs, nothing extra + will happen until you type 'go'. + CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time + you type 'go', you will be warned by kdb. The secend time you type + 'go', KDB tries to continue. No guarantees that the + kernel is still usable in this situation. + CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue. + No guarantees that the kernel is still usable in this situation. + CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot. + If you are not sure, say 0. + endif # KGDB diff --git a/lib/Makefile b/lib/Makefile index 02ed6c04cd7d..e9c52e1b853a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -15,6 +15,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o +obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -22,8 +23,10 @@ lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ - bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o + gcd.o lcm.o list_sort.o uuid.o flex_array.o \ + bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o +obj-y += string_helpers.o +obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o @@ -174,3 +177,5 @@ quiet_cmd_build_OID_registry = GEN $@ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ clean-files += oid_registry_data.c + +obj-$(CONFIG_UCS2_STRING) += ucs2_string.o diff --git a/lib/argv_split.c b/lib/argv_split.c index 1e9a6cbc3689..e927ed0e18a8 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c @@ -8,23 +8,17 @@ #include <linux/slab.h> #include <linux/export.h> -static const char *skip_arg(const char *cp) -{ - while (*cp && !isspace(*cp)) - cp++; - - return cp; -} - static int count_argc(const char *str) { int count = 0; + bool was_space; - while (*str) { - str = skip_spaces(str); - if (*str) { + for (was_space = true; *str; str++) { + if (isspace(*str)) { + was_space = true; + } else if (was_space) { + was_space = false; count++; - str = skip_arg(str); } } @@ -39,10 +33,8 @@ static int count_argc(const char *str) */ void argv_free(char **argv) { - char **p; - for (p = argv; *p; p++) - kfree(*p); - + argv--; + kfree(argv[0]); kfree(argv); } EXPORT_SYMBOL(argv_free); @@ -59,43 +51,44 @@ EXPORT_SYMBOL(argv_free); * considered to be a single argument separator. The returned array * is always NULL-terminated. Returns NULL on memory allocation * failure. + * + * The source string at `str' may be undergoing concurrent alteration via + * userspace sysctl activity (at least). The argv_split() implementation + * attempts to handle this gracefully by taking a local copy to work on. */ char **argv_split(gfp_t gfp, const char *str, int *argcp) { - int argc = count_argc(str); - char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); - char **argvp; - - if (argv == NULL) - goto out; - - if (argcp) - *argcp = argc; - - argvp = argv; - - while (*str) { - str = skip_spaces(str); - - if (*str) { - const char *p = str; - char *t; - - str = skip_arg(str); + char *argv_str; + bool was_space; + char **argv, **argv_ret; + int argc; + + argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp); + if (!argv_str) + return NULL; + + argc = count_argc(argv_str); + argv = kmalloc(sizeof(*argv) * (argc + 2), gfp); + if (!argv) { + kfree(argv_str); + return NULL; + } - t = kstrndup(p, str-p, gfp); - if (t == NULL) - goto fail; - *argvp++ = t; + *argv = argv_str; + argv_ret = ++argv; + for (was_space = true; *argv_str; argv_str++) { + if (isspace(*argv_str)) { + was_space = true; + *argv_str = 0; + } else if (was_space) { + was_space = false; + *argv++ = argv_str; } } - *argvp = NULL; - - out: - return argv; + *argv = NULL; - fail: - argv_free(argv); - return NULL; + if (argcp) + *argcp = argc; + return argv_ret; } EXPORT_SYMBOL(argv_split); diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -8,6 +8,7 @@ */ #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/tty.h> #include <linux/wait.h> @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) wake_up_klogd(); } } - - diff --git a/lib/checksum.c b/lib/checksum.c index 12dceb27ff20..129775eb6de6 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -102,6 +102,7 @@ out: } #endif +#ifndef ip_fast_csum /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. @@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) return (__force __sum16)~do_csum(iph, ihl*4); } EXPORT_SYMBOL(ip_fast_csum); +#endif /* * computes the checksum of a memory block at buff, length len, diff --git a/lib/debugobjects.c b/lib/debugobjects.c index d11808ca4bc4..37061ede8b81 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -109,11 +109,10 @@ static void fill_pool(void) */ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) { - struct hlist_node *node; struct debug_obj *obj; int cnt = 0; - hlist_for_each_entry(obj, node, &b->list, node) { + hlist_for_each_entry(obj, &b->list, node) { cnt++; if (obj->object == addr) return obj; @@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj) static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; @@ -227,7 +226,7 @@ static void debug_objects_oom(void) raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ - hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { + hlist_for_each_entry_safe(obj, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } @@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; HLIST_HEAD(freelist); struct debug_obj_descr *descr; enum debug_obj_state state; @@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) repeat: cnt = 0; raw_spin_lock_irqsave(&db->lock, flags); - hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_for_each_entry_safe(obj, tmp, &db->list, node) { cnt++; oaddr = (unsigned long) obj->object; if (oaddr < saddr || oaddr >= eaddr) @@ -702,7 +701,7 @@ repeat: raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ - hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { + hlist_for_each_entry_safe(obj, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } @@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void) static int __init debug_objects_replace_static_objects(void) { struct debug_bucket *db = obj_hash; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; struct debug_obj *obj, *new; HLIST_HEAD(objects); int i, cnt = 0; @@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void) local_irq_disable(); /* Remove the statically allocated objects from the pool */ - hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) + hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) hlist_del(&obj->node); /* Move the allocated objects to the pool */ hlist_move_list(&objects, &obj_pool); @@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void) for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { hlist_move_list(&db->list, &objects); - hlist_for_each_entry(obj, node, &objects, node) { + hlist_for_each_entry(obj, &objects, node) { new = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&new->node); /* copy object data */ @@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void) obj_pool_used); return 0; free: - hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { + hlist_for_each_entry_safe(obj, tmp, &objects, node) { hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } diff --git a/lib/decompress.c b/lib/decompress.c index 31a804277282..f8fdedaf7b3d 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -38,7 +38,7 @@ struct compress_format { decompress_fn decompressor; }; -static const struct compress_format compressed_formats[] __initdata = { +static const struct compress_format compressed_formats[] __initconst = { { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, { {0x42, 0x5a}, "bzip2", bunzip2 }, diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 4531294fa62f..960183d4258f 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c @@ -31,7 +31,7 @@ */ #ifdef STATIC -#include "lzo/lzo1x_decompress.c" +#include "lzo/lzo1x_decompress_safe.c" #else #include <linux/decompress/unlzo.h> #endif diff --git a/lib/devres.c b/lib/devres.c index 88ad75952a76..823533138fa0 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr) devm_ioport_map_match, (void *)addr)); } EXPORT_SYMBOL(devm_ioport_unmap); +#endif /* CONFIG_HAS_IOPORT */ #ifdef CONFIG_PCI /* @@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) } EXPORT_SYMBOL(pcim_iounmap_regions); #endif /* CONFIG_PCI */ -#endif /* CONFIG_HAS_IOPORT */ diff --git a/lib/div64.c b/lib/div64.c index a163b6caef73..3af5728d95fd 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -79,9 +79,10 @@ EXPORT_SYMBOL(div_s64_rem); #endif /** - * div64_u64 - unsigned 64bit divide with 64bit divisor + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder * @dividend: 64bit dividend * @divisor: 64bit divisor + * @remainder: 64bit remainder * * This implementation is a modified version of the algorithm proposed * by the book 'Hacker's Delight'. The original source and full proof @@ -89,27 +90,33 @@ EXPORT_SYMBOL(div_s64_rem); * * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' */ -#ifndef div64_u64 -u64 div64_u64(u64 dividend, u64 divisor) +#ifndef div64_u64_rem +u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { u32 high = divisor >> 32; u64 quot; if (high == 0) { - quot = div_u64(dividend, divisor); + u32 rem32; + quot = div_u64_rem(dividend, divisor, &rem32); + *remainder = rem32; } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; - if ((dividend - quot * divisor) >= divisor) + + *remainder = dividend - quot * divisor; + if (*remainder >= divisor) { quot++; + *remainder -= divisor; + } } return quot; } -EXPORT_SYMBOL(div64_u64); +EXPORT_SYMBOL(div64_u64_rem); #endif /** diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) entry = bucket_find_exact(bucket, ref); if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, - "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); } - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + return; } if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } @@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ref.dev = dev; ref.dev_addr = dma_addr; bucket = get_hash_bucket(&ref, &flags); - entry = bucket_find_exact(bucket, &ref); - if (!entry) - goto out; + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } - entry->map_err_type = MAP_ERR_CHECKED; -out: put_hash_bucket(bucket, &flags); } EXPORT_SYMBOL(debug_dma_mapping_error); diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 42f4f55c9458..53bad099ebd6 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -5,11 +5,16 @@ #include <linux/kernel.h> #include <linux/export.h> +#include <linux/sched.h> +/** + * dump_stack - dump the current task information and its stack trace + * + * Architectures can override this implementation by implementing its own. + */ void dump_stack(void) { - printk(KERN_NOTICE - "This architecture does not implement dump_stack()\n"); + dump_stack_print_info(KERN_DEFAULT); + show_stack(NULL, NULL); } - EXPORT_SYMBOL(dump_stack); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 5276b99ca650..99fec3ae405a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -24,6 +24,7 @@ #include <linux/sysctl.h> #include <linux/ctype.h> #include <linux/string.h> +#include <linux/string_helpers.h> #include <linux/uaccess.h> #include <linux/dynamic_debug.h> #include <linux/debugfs.h> @@ -276,48 +277,6 @@ static inline int parse_lineno(const char *str, unsigned int *val) return 0; } -/* - * Undo octal escaping in a string, inplace. This is useful to - * allow the user to express a query which matches a format - * containing embedded spaces. - */ -#define isodigit(c) ((c) >= '0' && (c) <= '7') -static char *unescape(char *str) -{ - char *in = str; - char *out = str; - - while (*in) { - if (*in == '\\') { - if (in[1] == '\\') { - *out++ = '\\'; - in += 2; - continue; - } else if (in[1] == 't') { - *out++ = '\t'; - in += 2; - continue; - } else if (in[1] == 'n') { - *out++ = '\n'; - in += 2; - continue; - } else if (isodigit(in[1]) && - isodigit(in[2]) && - isodigit(in[3])) { - *out++ = (((in[1] - '0') << 6) | - ((in[2] - '0') << 3) | - (in[3] - '0')); - in += 4; - continue; - } - } - *out++ = *in++; - } - *out = '\0'; - - return str; -} - static int check_set(const char **dest, char *src, char *name) { int rc = 0; @@ -371,8 +330,10 @@ static int ddebug_parse_query(char *words[], int nwords, } else if (!strcmp(words[i], "module")) { rc = check_set(&query->module, words[i+1], "module"); } else if (!strcmp(words[i], "format")) { - rc = check_set(&query->format, unescape(words[i+1]), - "format"); + string_unescape_inplace(words[i+1], UNESCAPE_SPACE | + UNESCAPE_OCTAL | + UNESCAPE_SPECIAL); + rc = check_set(&query->format, words[i+1], "format"); } else if (!strcmp(words[i], "line")) { char *first = words[i+1]; char *last = strchr(first, '-'); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f7210ad6cffd..c5c7a762b850 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -122,7 +122,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) return false; } - if (attr->probability <= random32() % 100) + if (attr->probability <= prandom_u32() % 100) return false; if (!fail_stacktrace(attr)) diff --git a/lib/genalloc.c b/lib/genalloc.c index 54920433705a..b35cfa9bc3d4 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -34,6 +34,8 @@ #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> +#include <linux/of_address.h> +#include <linux/of_device.h> static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { @@ -480,3 +482,82 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, return start_bit; } EXPORT_SYMBOL(gen_pool_best_fit); + +static void devm_gen_pool_release(struct device *dev, void *res) +{ + gen_pool_destroy(*(struct gen_pool **)res); +} + +/** + * devm_gen_pool_create - managed gen_pool_create + * @dev: device that provides the gen_pool + * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents + * @nid: node id of the node the pool structure should be allocated on, or -1 + * + * Create a new special memory pool that can be used to manage special purpose + * memory not managed by the regular kmalloc/kfree interface. The pool will be + * automatically destroyed by the device management code. + */ +struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, + int nid) +{ + struct gen_pool **ptr, *pool; + + ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); + + pool = gen_pool_create(min_alloc_order, nid); + if (pool) { + *ptr = pool; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return pool; +} + +/** + * dev_get_gen_pool - Obtain the gen_pool (if any) for a device + * @dev: device to retrieve the gen_pool from + * @name: Optional name for the gen_pool, usually NULL + * + * Returns the gen_pool for the device if one is present, or NULL. + */ +struct gen_pool *dev_get_gen_pool(struct device *dev) +{ + struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, + NULL); + + if (!p) + return NULL; + return *p; +} +EXPORT_SYMBOL_GPL(dev_get_gen_pool); + +#ifdef CONFIG_OF +/** + * of_get_named_gen_pool - find a pool by phandle property + * @np: device node + * @propname: property name containing phandle(s) + * @index: index into the phandle array + * + * Returns the pool that contains the chunk starting at the physical + * address of the device tree node pointed at by the phandle property, + * or NULL if not found. + */ +struct gen_pool *of_get_named_gen_pool(struct device_node *np, + const char *propname, int index) +{ + struct platform_device *pdev; + struct device_node *np_pool; + + np_pool = of_parse_phandle(np, propname, index); + if (!np_pool) + return NULL; + pdev = of_find_device_by_node(np_pool); + if (!pdev) + return NULL; + return dev_get_gen_pool(&pdev->dev); +} +EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +#endif /* CONFIG_OF */ diff --git a/lib/idr.c b/lib/idr.c index 648239079dd2..cca4b9302a71 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -35,10 +35,41 @@ #include <linux/string.h> #include <linux/idr.h> #include <linux/spinlock.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> + +#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) +#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) + +/* Leave the possibility of an incomplete final layer */ +#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) + +/* Number of id_layer structs to leave in free list */ +#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) static struct kmem_cache *idr_layer_cache; +static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); +static DEFINE_PER_CPU(int, idr_preload_cnt); static DEFINE_SPINLOCK(simple_ida_lock); +/* the maximum ID which can be allocated given idr->layers */ +static int idr_max(int layers) +{ + int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); + + return (1 << bits) - 1; +} + +/* + * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is + * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and + * so on. + */ +static int idr_layer_prefix_mask(int layer) +{ + return ~idr_max(layer + 1); +} + static struct idr_layer *get_from_free_list(struct idr *idp) { struct idr_layer *p; @@ -54,6 +85,62 @@ static struct idr_layer *get_from_free_list(struct idr *idp) return(p); } +/** + * idr_layer_alloc - allocate a new idr_layer + * @gfp_mask: allocation mask + * @layer_idr: optional idr to allocate from + * + * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch + * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch + * an idr_layer from @idr->id_free. + * + * @layer_idr is to maintain backward compatibility with the old alloc + * interface - idr_pre_get() and idr_get_new*() - and will be removed + * together with per-pool preload buffer. + */ +static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) +{ + struct idr_layer *new; + + /* this is the old path, bypass to get_from_free_list() */ + if (layer_idr) + return get_from_free_list(layer_idr); + + /* + * Try to allocate directly from kmem_cache. We want to try this + * before preload buffer; otherwise, non-preloading idr_alloc() + * users will end up taking advantage of preloading ones. As the + * following is allowed to fail for preloaded cases, suppress + * warning this time. + */ + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); + if (new) + return new; + + /* + * Try to fetch one from the per-cpu preload buffer if in process + * context. See idr_preload() for details. + */ + if (!in_interrupt()) { + preempt_disable(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } + preempt_enable(); + if (new) + return new; + } + + /* + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so + * that memory allocation failure warning is printed as intended. + */ + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); +} + static void idr_layer_rcu_free(struct rcu_head *head) { struct idr_layer *layer; @@ -62,8 +149,10 @@ static void idr_layer_rcu_free(struct rcu_head *head) kmem_cache_free(idr_layer_cache, layer); } -static inline void free_layer(struct idr_layer *p) +static inline void free_layer(struct idr *idr, struct idr_layer *p) { + if (idr->hint && idr->hint == p) + RCU_INIT_POINTER(idr->hint, NULL); call_rcu(&p->rcu_head, idr_layer_rcu_free); } @@ -92,35 +181,22 @@ static void idr_mark_full(struct idr_layer **pa, int id) struct idr_layer *p = pa[0]; int l = 0; - __set_bit(id & IDR_MASK, &p->bitmap); + __set_bit(id & IDR_MASK, p->bitmap); /* * If this layer is full mark the bit in the layer above to * show that this part of the radix tree is full. This may * complete the layer above and require walking up the radix * tree. */ - while (p->bitmap == IDR_FULL) { + while (bitmap_full(p->bitmap, IDR_SIZE)) { if (!(p = pa[++l])) break; id = id >> IDR_BITS; - __set_bit((id & IDR_MASK), &p->bitmap); + __set_bit((id & IDR_MASK), p->bitmap); } } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible. This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < MAX_IDR_FREE) { struct idr_layer *new; @@ -131,14 +207,30 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) } return 1; } -EXPORT_SYMBOL(idr_pre_get); +EXPORT_SYMBOL(__idr_pre_get); -static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) +/** + * sub_alloc - try to allocate an id without growing the tree depth + * @idp: idr handle + * @starting_id: id to start search at + * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer + * @gfp_mask: allocation mask for idr_layer_alloc() + * @layer_idr: optional idr passed to idr_layer_alloc() + * + * Allocate an id in range [@starting_id, INT_MAX] from @idp without + * growing its depth. Returns + * + * the allocated id >= 0 if successful, + * -EAGAIN if the tree needs to grow for allocation to succeed, + * -ENOSPC if the id space is exhausted, + * -ENOMEM if more idr_layers need to be allocated. + */ +static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, + gfp_t gfp_mask, struct idr *layer_idr) { int n, m, sh; struct idr_layer *p, *new; int l, id, oid; - unsigned long bm; id = *starting_id; restart: @@ -150,8 +242,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) * We run around this while until we reach the leaf node... */ n = (id >> (IDR_BITS*l)) & IDR_MASK; - bm = ~p->bitmap; - m = find_next_bit(&bm, IDR_SIZE, n); + m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); if (m == IDR_SIZE) { /* no space available go back to previous layer. */ l++; @@ -161,7 +252,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) /* if already at the top layer, we need to grow */ if (id >= 1 << (idp->layers * IDR_BITS)) { *starting_id = id; - return IDR_NEED_TO_GROW; + return -EAGAIN; } p = pa[l]; BUG_ON(!p); @@ -180,17 +271,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = ((id >> sh) ^ n ^ m) << sh; } if ((id >= MAX_IDR_BIT) || (id < 0)) - return IDR_NOMORE_SPACE; + return -ENOSPC; if (l == 0) break; /* * Create the layer below if it is missing. */ if (!p->ary[m]) { - new = get_from_free_list(idp); + new = idr_layer_alloc(gfp_mask, layer_idr); if (!new) - return -1; + return -ENOMEM; new->layer = l-1; + new->prefix = id & idr_layer_prefix_mask(new->layer); rcu_assign_pointer(p->ary[m], new); p->count++; } @@ -203,7 +295,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) } static int idr_get_empty_slot(struct idr *idp, int starting_id, - struct idr_layer **pa) + struct idr_layer **pa, gfp_t gfp_mask, + struct idr *layer_idr) { struct idr_layer *p, *new; int layers, v, id; @@ -214,8 +307,8 @@ build_up: p = idp->top; layers = idp->layers; if (unlikely(!p)) { - if (!(p = get_from_free_list(idp))) - return -1; + if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) + return -ENOMEM; p->layer = 0; layers = 1; } @@ -223,7 +316,7 @@ build_up: * Add a new layer to the top of the tree if the requested * id is larger than the currently allocated space. */ - while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { + while (id > idr_max(layers)) { layers++; if (!p->count) { /* special case: if the tree is currently empty, @@ -231,9 +324,10 @@ build_up: * upwards. */ p->layer++; + WARN_ON_ONCE(p->prefix); continue; } - if (!(new = get_from_free_list(idp))) { + if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { /* * The allocation failed. If we built part of * the structure tear it down. @@ -242,110 +336,191 @@ build_up: for (new = p; p && p != idp->top; new = p) { p = p->ary[0]; new->ary[0] = NULL; - new->bitmap = new->count = 0; + new->count = 0; + bitmap_clear(new->bitmap, 0, IDR_SIZE); __move_to_free_list(idp, new); } spin_unlock_irqrestore(&idp->lock, flags); - return -1; + return -ENOMEM; } new->ary[0] = p; new->count = 1; new->layer = layers-1; - if (p->bitmap == IDR_FULL) - __set_bit(0, &new->bitmap); + new->prefix = id & idr_layer_prefix_mask(new->layer); + if (bitmap_full(p->bitmap, IDR_SIZE)) + __set_bit(0, new->bitmap); p = new; } rcu_assign_pointer(idp->top, p); idp->layers = layers; - v = sub_alloc(idp, &id, pa); - if (v == IDR_NEED_TO_GROW) + v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); + if (v == -EAGAIN) goto build_up; return(v); } -static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) +/* + * @id and @pa are from a successful allocation from idr_get_empty_slot(). + * Install the user pointer @ptr and mark the slot full. + */ +static void idr_fill_slot(struct idr *idr, void *ptr, int id, + struct idr_layer **pa) { - struct idr_layer *pa[MAX_IDR_LEVEL]; - int id; + /* update hint used for lookup, cleared from free_layer() */ + rcu_assign_pointer(idr->hint, pa[0]); - id = idr_get_empty_slot(idp, starting_id, pa); - if (id >= 0) { - /* - * Successfully found an empty slot. Install the user - * pointer and mark the slot full. - */ - rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], - (struct idr_layer *)ptr); - pa[0]->count++; - idr_mark_full(pa, id); - } + rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); + pa[0]->count++; + idr_mark_full(pa, id); +} - return id; +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +{ + struct idr_layer *pa[MAX_IDR_LEVEL + 1]; + int rv; + + rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); + if (rv < 0) + return rv == -ENOMEM ? -EAGAIN : rv; + + idr_fill_slot(idp, ptr, rv, pa); + *id = rv; + return 0; } +EXPORT_SYMBOL(__idr_get_new_above); /** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading * - * This is the allocate id function. It should be called with any - * required locks. + * Preload per-cpu layer buffer for idr_alloc(). Can only be used from + * process context and each idr_preload() invocation should be matched with + * idr_preload_end(). Note that preemption is disabled while preloaded. * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. + * The first idr_alloc() in the preloaded section can be treated as if it + * were invoked with @gfp_mask used for preloading. This allows using more + * permissive allocation masks for idrs protected by spinlocks. * - * If the idr is full idr_get_new_above() will return %-ENOSPC. + * For example, if idr_alloc() below fails, the failure can be treated as + * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. * - * @id returns a value in the range @starting_id ... %0x7fffffff + * idr_preload(GFP_KERNEL); + * spin_lock(lock); + * + * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); + * + * spin_unlock(lock); + * idr_preload_end(); + * if (id < 0) + * error; */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +void idr_preload(gfp_t gfp_mask) { - int rv; + /* + * Consuming preload buffer from non-process context breaks preload + * allocation guarantee. Disallow usage from those contexts. + */ + WARN_ON_ONCE(in_interrupt()); + might_sleep_if(gfp_mask & __GFP_WAIT); + + preempt_disable(); - rv = idr_get_new_above_int(idp, ptr, starting_id); /* - * This is a cheap hack until the IDR code can be fixed to - * return proper error values. + * idr_alloc() is likely to succeed w/o full idr_layer buffer and + * return value from idr_alloc() needs to be checked for failure + * anyway. Silently give up if allocation fails. The caller can + * treat failures from idr_alloc() as if idr_alloc() were called + * with @gfp_mask which should be enough. */ - if (rv < 0) - return _idr_rc_to_errno(rv); - *id = rv; - return 0; + while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { + struct idr_layer *new; + + preempt_enable(); + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); + preempt_disable(); + if (!new) + break; + + /* link the new one to per-cpu preload list */ + new->ary[0] = __this_cpu_read(idr_preload_head); + __this_cpu_write(idr_preload_head, new); + __this_cpu_inc(idr_preload_cnt); + } } -EXPORT_SYMBOL(idr_get_new_above); +EXPORT_SYMBOL(idr_preload); /** - * idr_get_new - allocate new idr entry - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: pointer to the allocated handle + * idr_alloc - allocate new idr entry + * @idr: the (initialized) idr + * @ptr: pointer to be associated with the new id + * @start: the minimum id (inclusive) + * @end: the maximum id (exclusive, <= 0 for max) + * @gfp_mask: memory allocation flags * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. + * Allocate an id in [start, end) and associate it with @ptr. If no ID is + * available in the specified range, returns -ENOSPC. On memory allocation + * failure, returns -ENOMEM. * - * If the idr is full idr_get_new_above() will return %-ENOSPC. + * Note that @end is treated as max when <= 0. This is to always allow + * using @start + N as @end as long as N is inside integer range. * - * @id returns a value in the range %0 ... %0x7fffffff + * The user is responsible for exclusively synchronizing all operations + * which may modify @idr. However, read-only accesses such as idr_find() + * or iteration can be performed under RCU read lock provided the user + * destroys @ptr in RCU-safe way after removal from idr. */ -int idr_get_new(struct idr *idp, void *ptr, int *id) +int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) { - int rv; + int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ + struct idr_layer *pa[MAX_IDR_LEVEL + 1]; + int id; - rv = idr_get_new_above_int(idp, ptr, 0); - /* - * This is a cheap hack until the IDR code can be fixed to - * return proper error values. - */ - if (rv < 0) - return _idr_rc_to_errno(rv); - *id = rv; - return 0; + might_sleep_if(gfp_mask & __GFP_WAIT); + + /* sanity checks */ + if (WARN_ON_ONCE(start < 0)) + return -EINVAL; + if (unlikely(max < start)) + return -ENOSPC; + + /* allocate id */ + id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); + if (unlikely(id < 0)) + return id; + if (unlikely(id > max)) + return -ENOSPC; + + idr_fill_slot(idr, ptr, id, pa); + return id; +} +EXPORT_SYMBOL_GPL(idr_alloc); + +/** + * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion + * @idr: the (initialized) idr + * @ptr: pointer to be associated with the new id + * @start: the minimum id (inclusive) + * @end: the maximum id (exclusive, <= 0 for max) + * @gfp_mask: memory allocation flags + * + * Essentially the same as idr_alloc, but prefers to allocate progressively + * higher ids if it can. If the "cur" counter wraps, then it will start again + * at the "start" end of the range and allocate one that has already been used. + */ +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, + gfp_t gfp_mask) +{ + int id; + + id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); + if (id == -ENOSPC) + id = idr_alloc(idr, ptr, start, end, gfp_mask); + + if (likely(id >= 0)) + idr->cur = id + 1; + return id; } -EXPORT_SYMBOL(idr_get_new); +EXPORT_SYMBOL(idr_alloc_cyclic); static void idr_remove_warning(int id) { @@ -357,7 +532,7 @@ static void idr_remove_warning(int id) static void sub_remove(struct idr *idp, int shift, int id) { struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_IDR_LEVEL]; + struct idr_layer **pa[MAX_IDR_LEVEL + 1]; struct idr_layer ***paa = &pa[0]; struct idr_layer *to_free; int n; @@ -367,26 +542,26 @@ static void sub_remove(struct idr *idp, int shift, int id) while ((shift > 0) && p) { n = (id >> shift) & IDR_MASK; - __clear_bit(n, &p->bitmap); + __clear_bit(n, p->bitmap); *++paa = &p->ary[n]; p = p->ary[n]; shift -= IDR_BITS; } n = id & IDR_MASK; - if (likely(p != NULL && test_bit(n, &p->bitmap))){ - __clear_bit(n, &p->bitmap); + if (likely(p != NULL && test_bit(n, p->bitmap))) { + __clear_bit(n, p->bitmap); rcu_assign_pointer(p->ary[n], NULL); to_free = NULL; while(*paa && ! --((**paa)->count)){ if (to_free) - free_layer(to_free); + free_layer(idp, to_free); to_free = **paa; **paa-- = NULL; } if (!*paa) idp->layers = 0; if (to_free) - free_layer(to_free); + free_layer(idp, to_free); } else idr_remove_warning(id); } @@ -401,8 +576,8 @@ void idr_remove(struct idr *idp, int id) struct idr_layer *p; struct idr_layer *to_free; - /* Mask off upper bits we don't use for the search. */ - id &= MAX_IDR_MASK; + if (id < 0) + return; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); if (idp->top && idp->top->count == 1 && (idp->layers > 1) && @@ -417,8 +592,9 @@ void idr_remove(struct idr *idp, int id) p = idp->top->ary[0]; rcu_assign_pointer(idp->top, p); --idp->layers; - to_free->bitmap = to_free->count = 0; - free_layer(to_free); + to_free->count = 0; + bitmap_clear(to_free->bitmap, 0, IDR_SIZE); + free_layer(idp, to_free); } while (idp->id_free_cnt >= MAX_IDR_FREE) { p = get_from_free_list(idp); @@ -433,34 +609,21 @@ void idr_remove(struct idr *idp, int id) } EXPORT_SYMBOL(idr_remove); -/** - * idr_remove_all - remove all ids from the given idr tree - * @idp: idr handle - * - * idr_destroy() only frees up unused, cached idp_layers, but this - * function will remove all id mappings and leave all idp_layers - * unused. - * - * A typical clean-up sequence for objects stored in an idr tree will - * use idr_for_each() to free all objects, if necessay, then - * idr_remove_all() to remove all ids, and idr_destroy() to free - * up the cached idr_layers. - */ -void idr_remove_all(struct idr *idp) +void __idr_remove_all(struct idr *idp) { int n, id, max; int bt_mask; struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL + 1]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; p = idp->top; rcu_assign_pointer(idp->top, NULL); - max = 1 << n; + max = idr_max(idp->layers); id = 0; - while (id < max) { + while (id >= 0 && id <= max) { while (n > IDR_BITS && p) { n -= IDR_BITS; *paa++ = p; @@ -472,21 +635,32 @@ void idr_remove_all(struct idr *idp) /* Get the highest bit that the above add changed from 0->1. */ while (n < fls(id ^ bt_mask)) { if (p) - free_layer(p); + free_layer(idp, p); n += IDR_BITS; p = *--paa; } } idp->layers = 0; } -EXPORT_SYMBOL(idr_remove_all); +EXPORT_SYMBOL(__idr_remove_all); /** * idr_destroy - release all cached layers within an idr tree * @idp: idr handle + * + * Free all id mappings and all idp_layers. After this function, @idp is + * completely unused and can be freed / recycled. The caller is + * responsible for ensuring that no one else accesses @idp during or after + * idr_destroy(). + * + * A typical clean-up sequence for objects stored in an idr tree will use + * idr_for_each() to free all objects, if necessay, then idr_destroy() to + * free up the id mappings and cached idr_layers. */ void idr_destroy(struct idr *idp) { + __idr_remove_all(idp); + while (idp->id_free_cnt) { struct idr_layer *p = get_from_free_list(idp); kmem_cache_free(idr_layer_cache, p); @@ -494,32 +668,20 @@ void idr_destroy(struct idr *idp) } EXPORT_SYMBOL(idr_destroy); -/** - * idr_find - return pointer for given id - * @idp: idr handle - * @id: lookup key - * - * Return the pointer given the id it has been registered with. A %NULL - * return indicates that @id is not valid or you passed %NULL in - * idr_get_new(). - * - * This function can be called under rcu_read_lock(), given that the leaf - * pointers lifetimes are correctly managed. - */ -void *idr_find(struct idr *idp, int id) +void *idr_find_slowpath(struct idr *idp, int id) { int n; struct idr_layer *p; + if (id < 0) + return NULL; + p = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer+1) * IDR_BITS; - /* Mask off upper bits we don't use for the search. */ - id &= MAX_IDR_MASK; - - if (id >= (1 << n)) + if (id > idr_max(p->layer + 1)) return NULL; BUG_ON(n == 0); @@ -530,7 +692,7 @@ void *idr_find(struct idr *idp, int id) } return((void *)p); } -EXPORT_SYMBOL(idr_find); +EXPORT_SYMBOL(idr_find_slowpath); /** * idr_for_each - iterate through all stored pointers @@ -555,15 +717,15 @@ int idr_for_each(struct idr *idp, { int n, id, max, error = 0; struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL + 1]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; p = rcu_dereference_raw(idp->top); - max = 1 << n; + max = idr_max(idp->layers); id = 0; - while (id < max) { + while (id >= 0 && id <= max) { while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; @@ -601,7 +763,7 @@ EXPORT_SYMBOL(idr_for_each); */ void *idr_get_next(struct idr *idp, int *nextidp) { - struct idr_layer *p, *pa[MAX_IDR_LEVEL]; + struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; struct idr_layer **paa = &pa[0]; int id = *nextidp; int n, max; @@ -611,9 +773,9 @@ void *idr_get_next(struct idr *idp, int *nextidp) if (!p) return NULL; n = (p->layer + 1) * IDR_BITS; - max = 1 << n; + max = idr_max(p->layer + 1); - while (id < max) { + while (id >= 0 && id <= max) { while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; @@ -625,7 +787,14 @@ void *idr_get_next(struct idr *idp, int *nextidp) return p; } - id += 1 << n; + /* + * Proceed to the next layer at the current level. Unlike + * idr_for_each(), @id isn't guaranteed to be aligned to + * layer boundary at this point and adding 1 << n may + * incorrectly skip IDs. Make sure we jump to the + * beginning of the next layer using round_up(). + */ + id = round_up(id + 1, 1 << n); while (n < fls(id)) { n += IDR_BITS; p = *--paa; @@ -653,14 +822,15 @@ void *idr_replace(struct idr *idp, void *ptr, int id) int n; struct idr_layer *p, *old_p; + if (id < 0) + return ERR_PTR(-EINVAL); + p = idp->top; if (!p) return ERR_PTR(-EINVAL); n = (p->layer+1) * IDR_BITS; - id &= MAX_IDR_MASK; - if (id >= (1 << n)) return ERR_PTR(-EINVAL); @@ -671,7 +841,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) } n = id & IDR_MASK; - if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) + if (unlikely(p == NULL || !test_bit(n, p->bitmap))) return ERR_PTR(-ENOENT); old_p = p->ary[n]; @@ -745,7 +915,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { /* allocate idr_layers */ - if (!idr_pre_get(&ida->idr, gfp_mask)) + if (!__idr_pre_get(&ida->idr, gfp_mask)) return 0; /* allocate free_bitmap */ @@ -780,7 +950,7 @@ EXPORT_SYMBOL(ida_pre_get); */ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { - struct idr_layer *pa[MAX_IDR_LEVEL]; + struct idr_layer *pa[MAX_IDR_LEVEL + 1]; struct ida_bitmap *bitmap; unsigned long flags; int idr_id = starting_id / IDA_BITMAP_BITS; @@ -789,9 +959,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) restart: /* get vacant slot */ - t = idr_get_empty_slot(&ida->idr, idr_id, pa); + t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); if (t < 0) - return _idr_rc_to_errno(t); + return t == -ENOMEM ? -EAGAIN : t; if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) return -ENOSPC; @@ -852,25 +1022,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) EXPORT_SYMBOL(ida_get_new_above); /** - * ida_get_new - allocate new ID - * @ida: idr handle - * @p_id: pointer to the allocated handle - * - * Allocate new ID. It should be called with any required locks. - * - * If memory is required, it will return %-EAGAIN, you should unlock - * and go back to the idr_pre_get() call. If the idr is full, it will - * return %-ENOSPC. - * - * @p_id returns a value in the range %0 ... %0x7fffffff. - */ -int ida_get_new(struct ida *ida, int *p_id) -{ - return ida_get_new_above(ida, 0, p_id); -} -EXPORT_SYMBOL(ida_get_new); - -/** * ida_remove - remove the given ID * @ida: ida handle * @id: ID to free @@ -887,7 +1038,7 @@ void ida_remove(struct ida *ida, int id) /* clear full bits while looking up the leaf idr_layer */ while ((shift > 0) && p) { n = (idr_id >> shift) & IDR_MASK; - __clear_bit(n, &p->bitmap); + __clear_bit(n, p->bitmap); p = p->ary[n]; shift -= IDR_BITS; } @@ -896,7 +1047,7 @@ void ida_remove(struct ida *ida, int id) goto err; n = idr_id & IDR_MASK; - __clear_bit(n, &p->bitmap); + __clear_bit(n, p->bitmap); bitmap = (void *)p->ary[n]; if (!test_bit(offset, bitmap->bitmap)) @@ -905,7 +1056,7 @@ void ida_remove(struct ida *ida, int id) /* update bitmap and remove it if empty */ __clear_bit(offset, bitmap->bitmap); if (--bitmap->nr_busy == 0) { - __set_bit(n, &p->bitmap); /* to please idr_remove() */ + __set_bit(n, p->bitmap); /* to please idr_remove() */ idr_remove(&ida->idr, idr_id); free_bitmap(ida, bitmap); } diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index fc2eeb7cb2ea..1ef4cc344977 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c @@ -1,3 +1,9 @@ +/* + * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com> + * + * Based on the shift-and-subtract algorithm for computing integer + * square root from Guy L. Steele. + */ #include <linux/kernel.h> #include <linux/export.h> @@ -10,23 +16,23 @@ */ unsigned long int_sqrt(unsigned long x) { - unsigned long op, res, one; + unsigned long b, m, y = 0; - op = x; - res = 0; + if (x <= 1) + return x; - one = 1UL << (BITS_PER_LONG - 2); - while (one > op) - one >>= 2; + m = 1UL << (BITS_PER_LONG - 2); + while (m != 0) { + b = y + m; + y >>= 1; - while (one != 0) { - if (op >= res + one) { - op = op - (res + one); - res = res + 2 * one; + if (x >= b) { + x -= b; + y += m; } - res /= 2; - one /= 4; + m >>= 2; } - return res; + + return y; } EXPORT_SYMBOL(int_sqrt); diff --git a/lib/kfifo.c b/lib/kfifo.c new file mode 100644 index 000000000000..7b7f83027b7b --- /dev/null +++ b/lib/kfifo.c @@ -0,0 +1,607 @@ +/* + * A generic kernel FIFO implementation + * + * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/log2.h> +#include <linux/uaccess.h> +#include <linux/kfifo.h> + +/* + * internal helper to calculate the unused elements in a fifo + */ +static inline unsigned int kfifo_unused(struct __kfifo *fifo) +{ + return (fifo->mask + 1) - (fifo->in - fifo->out); +} + +int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, + size_t esize, gfp_t gfp_mask) +{ + /* + * round down to the next power of 2, since our 'let the indices + * wrap' technique works only in this case. + */ + size = roundup_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + + if (size < 2) { + fifo->data = NULL; + fifo->mask = 0; + return -EINVAL; + } + + fifo->data = kmalloc(size * esize, gfp_mask); + + if (!fifo->data) { + fifo->mask = 0; + return -ENOMEM; + } + fifo->mask = size - 1; + + return 0; +} +EXPORT_SYMBOL(__kfifo_alloc); + +void __kfifo_free(struct __kfifo *fifo) +{ + kfree(fifo->data); + fifo->in = 0; + fifo->out = 0; + fifo->esize = 0; + fifo->data = NULL; + fifo->mask = 0; +} +EXPORT_SYMBOL(__kfifo_free); + +int __kfifo_init(struct __kfifo *fifo, void *buffer, + unsigned int size, size_t esize) +{ + size /= esize; + + size = roundup_pow_of_two(size); + + fifo->in = 0; + fifo->out = 0; + fifo->esize = esize; + fifo->data = buffer; + + if (size < 2) { + fifo->mask = 0; + return -EINVAL; + } + fifo->mask = size - 1; + + return 0; +} +EXPORT_SYMBOL(__kfifo_init); + +static void kfifo_copy_in(struct __kfifo *fifo, const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(fifo->data + off, src, l); + memcpy(fifo->data, src + l, len - l); + /* + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter + */ + smp_wmb(); +} + +unsigned int __kfifo_in(struct __kfifo *fifo, + const void *buf, unsigned int len) +{ + unsigned int l; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + kfifo_copy_in(fifo, buf, len, fifo->in); + fifo->in += len; + return len; +} +EXPORT_SYMBOL(__kfifo_in); + +static void kfifo_copy_out(struct __kfifo *fifo, void *dst, + unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, fifo->data + off, l); + memcpy(dst + l, fifo->data, len - l); + /* + * make sure that the data is copied before + * incrementing the fifo->out index counter + */ + smp_wmb(); +} + +unsigned int __kfifo_out_peek(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + unsigned int l; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + + kfifo_copy_out(fifo, buf, len, fifo->out); + return len; +} +EXPORT_SYMBOL(__kfifo_out_peek); + +unsigned int __kfifo_out(struct __kfifo *fifo, + void *buf, unsigned int len) +{ + len = __kfifo_out_peek(fifo, buf, len); + fifo->out += len; + return len; +} +EXPORT_SYMBOL(__kfifo_out); + +static unsigned long kfifo_copy_from_user(struct __kfifo *fifo, + const void __user *from, unsigned int len, unsigned int off, + unsigned int *copied) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + unsigned long ret; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_from_user(fifo->data + off, from, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_from_user(fifo->data, from + l, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } + /* + * make sure that the data in the fifo is up to date before + * incrementing the fifo->in index counter + */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} + +int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; + + if (esize != 1) + len /= esize; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); + if (unlikely(ret)) { + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->in += len; + return err; +} +EXPORT_SYMBOL(__kfifo_from_user); + +static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to, + unsigned int len, unsigned int off, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + ret = copy_to_user(to, fifo->data + off, l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret + len - l, esize); + else { + ret = copy_to_user(to + l, fifo->data, len - l); + if (unlikely(ret)) + ret = DIV_ROUND_UP(ret, esize); + } + /* + * make sure that the data is copied before + * incrementing the fifo->out index counter + */ + smp_wmb(); + *copied = len - ret; + /* return the number of elements which are not copied */ + return ret; +} + +int __kfifo_to_user(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied) +{ + unsigned int l; + unsigned long ret; + unsigned int esize = fifo->esize; + int err; + + if (esize != 1) + len /= esize; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied); + if (unlikely(ret)) { + len -= ret; + err = -EFAULT; + } else + err = 0; + fifo->out += len; + return err; +} +EXPORT_SYMBOL(__kfifo_to_user); + +static int setup_sgl_buf(struct scatterlist *sgl, void *buf, + int nents, unsigned int len) +{ + int n; + unsigned int l; + unsigned int off; + struct page *page; + + if (!nents) + return 0; + + if (!len) + return 0; + + n = 0; + page = virt_to_page(buf); + off = offset_in_page(buf); + l = 0; + + while (len >= l + PAGE_SIZE - off) { + struct page *npage; + + l += PAGE_SIZE; + buf += PAGE_SIZE; + npage = virt_to_page(buf); + if (page_to_phys(page) != page_to_phys(npage) - l) { + sg_set_page(sgl, page, l - off, off); + sgl = sg_next(sgl); + if (++n == nents || sgl == NULL) + return n; + page = npage; + len -= l - off; + l = off = 0; + } + } + sg_set_page(sgl, page, len, off); + return n + 1; +} + +static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, + int nents, unsigned int len, unsigned int off) +{ + unsigned int size = fifo->mask + 1; + unsigned int esize = fifo->esize; + unsigned int l; + unsigned int n; + + off &= fifo->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + n = setup_sgl_buf(sgl, fifo->data + off, nents, l); + n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); + + return n; +} + +unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) +{ + unsigned int l; + + l = kfifo_unused(fifo); + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->in); +} +EXPORT_SYMBOL(__kfifo_dma_in_prepare); + +unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len) +{ + unsigned int l; + + l = fifo->in - fifo->out; + if (len > l) + len = l; + + return setup_sgl(fifo, sgl, nents, len, fifo->out); +} +EXPORT_SYMBOL(__kfifo_dma_out_prepare); + +unsigned int __kfifo_max_r(unsigned int len, size_t recsize) +{ + unsigned int max = (1 << (recsize << 3)) - 1; + + if (len > max) + return max; + return len; +} +EXPORT_SYMBOL(__kfifo_max_r); + +#define __KFIFO_PEEK(data, out, mask) \ + ((data)[(out) & (mask)]) +/* + * __kfifo_peek_n internal helper function for determinate the length of + * the next record in the fifo + */ +static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize) +{ + unsigned int l; + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; + + l = __KFIFO_PEEK(data, fifo->out, mask); + + if (--recsize) + l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; + + return l; +} + +#define __KFIFO_POKE(data, in, mask, val) \ + ( \ + (data)[(in) & (mask)] = (unsigned char)(val) \ + ) + +/* + * __kfifo_poke_n internal helper function for storeing the length of + * the record into the fifo + */ +static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) +{ + unsigned int mask = fifo->mask; + unsigned char *data = fifo->data; + + __KFIFO_POKE(data, fifo->in, mask, n); + + if (recsize > 1) + __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); +} + +unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize) +{ + return __kfifo_peek_n(fifo, recsize); +} +EXPORT_SYMBOL(__kfifo_len_r); + +unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, + unsigned int len, size_t recsize) +{ + if (len + recsize > kfifo_unused(fifo)) + return 0; + + __kfifo_poke_n(fifo, len, recsize); + + kfifo_copy_in(fifo, buf, len, fifo->in + recsize); + fifo->in += len + recsize; + return len; +} +EXPORT_SYMBOL(__kfifo_in_r); + +static unsigned int kfifo_out_copy_r(struct __kfifo *fifo, + void *buf, unsigned int len, size_t recsize, unsigned int *n) +{ + *n = __kfifo_peek_n(fifo, recsize); + + if (len > *n) + len = *n; + + kfifo_copy_out(fifo, buf, len, fifo->out + recsize); + return len; +} + +unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) +{ + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + return kfifo_out_copy_r(fifo, buf, len, recsize, &n); +} +EXPORT_SYMBOL(__kfifo_out_peek_r); + +unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, + unsigned int len, size_t recsize) +{ + unsigned int n; + + if (fifo->in == fifo->out) + return 0; + + len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); + fifo->out += n + recsize; + return len; +} +EXPORT_SYMBOL(__kfifo_out_r); + +void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) +{ + unsigned int n; + + n = __kfifo_peek_n(fifo, recsize); + fifo->out += n + recsize; +} +EXPORT_SYMBOL(__kfifo_skip_r); + +int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, + unsigned long len, unsigned int *copied, size_t recsize) +{ + unsigned long ret; + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > kfifo_unused(fifo)) { + *copied = 0; + return 0; + } + + __kfifo_poke_n(fifo, len, recsize); + + ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->in += len + recsize; + return 0; +} +EXPORT_SYMBOL(__kfifo_from_user_r); + +int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, + unsigned long len, unsigned int *copied, size_t recsize) +{ + unsigned long ret; + unsigned int n; + + if (fifo->in == fifo->out) { + *copied = 0; + return 0; + } + + n = __kfifo_peek_n(fifo, recsize); + if (len > n) + len = n; + + ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied); + if (unlikely(ret)) { + *copied = 0; + return -EFAULT; + } + fifo->out += n + recsize; + return 0; +} +EXPORT_SYMBOL(__kfifo_to_user_r); + +unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) +{ + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > kfifo_unused(fifo)) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); +} +EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); + +void __kfifo_dma_in_finish_r(struct __kfifo *fifo, + unsigned int len, size_t recsize) +{ + len = __kfifo_max_r(len, recsize); + __kfifo_poke_n(fifo, len, recsize); + fifo->in += len + recsize; +} +EXPORT_SYMBOL(__kfifo_dma_in_finish_r); + +unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) +{ + if (!nents) + BUG(); + + len = __kfifo_max_r(len, recsize); + + if (len + recsize > fifo->in - fifo->out) + return 0; + + return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); +} +EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); + +void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) +{ + unsigned int len; + + len = __kfifo_peek_n(fifo, recsize); + fifo->out += len + recsize; +} +EXPORT_SYMBOL(__kfifo_dma_out_finish_r); diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f1..a65486613d79 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj) return kobj; } +static struct kobject *kobject_get_unless_zero(struct kobject *kobj) +{ + if (!kref_get_unless_zero(&kobj->kref)) + kobj = NULL; + return kobj; +} + /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup @@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { - ret = kobject_get(k); + ret = kobject_get_unless_zero(k); break; } } diff --git a/lib/list_sort.c b/lib/list_sort.c index d7325c6b103f..1183fa70a44d 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -229,7 +229,7 @@ static int __init list_sort_test(void) goto exit; } /* force some equivalencies */ - el->value = random32() % (TEST_LIST_LEN/3); + el->value = prandom_u32() % (TEST_LIST_LEN / 3); el->serial = i; el->poison1 = TEST_POISON1; el->poison2 = TEST_POISON2; diff --git a/lib/lru_cache.c b/lib/lru_cache.c index d71d89498943..8335d39d2ccd 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, bool include_changing) { - struct hlist_node *n; struct lc_element *e; BUG_ON(!lc); BUG_ON(!lc->nr_elements); - hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { + hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { /* "about to be changed" elements, pending transaction commit, * are hashed by their "new number". "Normal" elements have * lc_number == lc_new_number. */ diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index e764116ea12d..f0f7d7ca2b83 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ lzo_compress-objs := lzo1x_compress.o -lzo_decompress-objs := lzo1x_decompress.o +lzo_decompress-objs := lzo1x_decompress_safe.o obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index a6040990a62e..236eb21167b5 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -1,194 +1,243 @@ /* - * LZO1X Compressor from MiniLZO + * LZO1X Compressor from LZO * - * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * - * Changed for kernel use by: + * Changed for Linux kernel use by: * Nitin Gupta <nitingupta910@gmail.com> * Richard Purdie <rpurdie@openedhand.com> */ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/lzo.h> #include <asm/unaligned.h> +#include <linux/lzo.h> #include "lzodefs.h" static noinline size_t -_lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, void *wrkmem) +lzo1x_1_do_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + size_t ti, void *wrkmem) { + const unsigned char *ip; + unsigned char *op; const unsigned char * const in_end = in + in_len; - const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5; - const unsigned char ** const dict = wrkmem; - const unsigned char *ip = in, *ii = ip; - const unsigned char *end, *m, *m_pos; - size_t m_off, m_len, dindex; - unsigned char *op = out; + const unsigned char * const ip_end = in + in_len - 20; + const unsigned char *ii; + lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; - ip += 4; + op = out; + ip = in; + ii = ip; + ip += ti < 4 ? 4 - ti : 0; for (;;) { - dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK; - m_pos = dict[dindex]; - - if (m_pos < in) - goto literal; - - if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) - goto literal; - - m_off = ip - m_pos; - if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) - goto try_match; - - dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f); - m_pos = dict[dindex]; - - if (m_pos < in) - goto literal; - - if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET)) - goto literal; - - m_off = ip - m_pos; - if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3]) - goto try_match; - - goto literal; - -try_match: - if (get_unaligned((const unsigned short *)m_pos) - == get_unaligned((const unsigned short *)ip)) { - if (likely(m_pos[2] == ip[2])) - goto match; - } - + const unsigned char *m_pos; + size_t t, m_len, m_off; + u32 dv; literal: - dict[dindex] = ip; - ++ip; + ip += 1 + ((ip - ii) >> 5); +next: if (unlikely(ip >= ip_end)) break; - continue; - -match: - dict[dindex] = ip; - if (ip != ii) { - size_t t = ip - ii; + dv = get_unaligned_le32(ip); + t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; + m_pos = in + dict[t]; + dict[t] = (lzo_dict_t) (ip - in); + if (unlikely(dv != get_unaligned_le32(m_pos))) + goto literal; + ii -= ti; + ti = 0; + t = ip - ii; + if (t != 0) { if (t <= 3) { op[-2] |= t; - } else if (t <= 18) { + COPY4(op, ii); + op += t; + } else if (t <= 16) { *op++ = (t - 3); + COPY8(op, ii); + COPY8(op + 8, ii + 8); + op += t; } else { - size_t tt = t - 18; - - *op++ = 0; - while (tt > 255) { - tt -= 255; + if (t <= 18) { + *op++ = (t - 3); + } else { + size_t tt = t - 18; *op++ = 0; + while (unlikely(tt > 255)) { + tt -= 255; + *op++ = 0; + } + *op++ = tt; } - *op++ = tt; + do { + COPY8(op, ii); + COPY8(op + 8, ii + 8); + op += 16; + ii += 16; + t -= 16; + } while (t >= 16); + if (t > 0) do { + *op++ = *ii++; + } while (--t > 0); } - do { - *op++ = *ii++; - } while (--t > 0); } - ip += 3; - if (m_pos[3] != *ip++ || m_pos[4] != *ip++ - || m_pos[5] != *ip++ || m_pos[6] != *ip++ - || m_pos[7] != *ip++ || m_pos[8] != *ip++) { - --ip; - m_len = ip - ii; + m_len = 4; + { +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) + u64 v; + v = get_unaligned((const u64 *) (ip + m_len)) ^ + get_unaligned((const u64 *) (m_pos + m_len)); + if (unlikely(v == 0)) { + do { + m_len += 8; + v = get_unaligned((const u64 *) (ip + m_len)) ^ + get_unaligned((const u64 *) (m_pos + m_len)); + if (unlikely(ip + m_len >= ip_end)) + goto m_len_done; + } while (v == 0); + } +# if defined(__LITTLE_ENDIAN) + m_len += (unsigned) __builtin_ctzll(v) / 8; +# elif defined(__BIG_ENDIAN) + m_len += (unsigned) __builtin_clzll(v) / 8; +# else +# error "missing endian definition" +# endif +#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) + u32 v; + v = get_unaligned((const u32 *) (ip + m_len)) ^ + get_unaligned((const u32 *) (m_pos + m_len)); + if (unlikely(v == 0)) { + do { + m_len += 4; + v = get_unaligned((const u32 *) (ip + m_len)) ^ + get_unaligned((const u32 *) (m_pos + m_len)); + if (v != 0) + break; + m_len += 4; + v = get_unaligned((const u32 *) (ip + m_len)) ^ + get_unaligned((const u32 *) (m_pos + m_len)); + if (unlikely(ip + m_len >= ip_end)) + goto m_len_done; + } while (v == 0); + } +# if defined(__LITTLE_ENDIAN) + m_len += (unsigned) __builtin_ctz(v) / 8; +# elif defined(__BIG_ENDIAN) + m_len += (unsigned) __builtin_clz(v) / 8; +# else +# error "missing endian definition" +# endif +#else + if (unlikely(ip[m_len] == m_pos[m_len])) { + do { + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (ip[m_len] != m_pos[m_len]) + break; + m_len += 1; + if (unlikely(ip + m_len >= ip_end)) + goto m_len_done; + } while (ip[m_len] == m_pos[m_len]); + } +#endif + } +m_len_done: - if (m_off <= M2_MAX_OFFSET) { - m_off -= 1; - *op++ = (((m_len - 1) << 5) - | ((m_off & 7) << 2)); - *op++ = (m_off >> 3); - } else if (m_off <= M3_MAX_OFFSET) { - m_off -= 1; + m_off = ip - m_pos; + ip += m_len; + ii = ip; + if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { + m_off -= 1; + *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); + *op++ = (m_off >> 3); + } else if (m_off <= M3_MAX_OFFSET) { + m_off -= 1; + if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); - goto m3_m4_offset; - } else { - m_off -= 0x4000; - - *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11) - | (m_len - 2)); - goto m3_m4_offset; + else { + m_len -= M3_MAX_LEN; + *op++ = M3_MARKER | 0; + while (unlikely(m_len > 255)) { + m_len -= 255; + *op++ = 0; + } + *op++ = (m_len); } + *op++ = (m_off << 2); + *op++ = (m_off >> 6); } else { - end = in_end; - m = m_pos + M2_MAX_LEN + 1; - - while (ip < end && *m == *ip) { - m++; - ip++; - } - m_len = ip - ii; - - if (m_off <= M3_MAX_OFFSET) { - m_off -= 1; - if (m_len <= 33) { - *op++ = (M3_MARKER | (m_len - 2)); - } else { - m_len -= 33; - *op++ = M3_MARKER | 0; - goto m3_m4_len; - } - } else { - m_off -= 0x4000; - if (m_len <= M4_MAX_LEN) { - *op++ = (M4_MARKER - | ((m_off & 0x4000) >> 11) + m_off -= 0x4000; + if (m_len <= M4_MAX_LEN) + *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); - } else { - m_len -= M4_MAX_LEN; - *op++ = (M4_MARKER - | ((m_off & 0x4000) >> 11)); -m3_m4_len: - while (m_len > 255) { - m_len -= 255; - *op++ = 0; - } - - *op++ = (m_len); + else { + m_len -= M4_MAX_LEN; + *op++ = (M4_MARKER | ((m_off >> 11) & 8)); + while (unlikely(m_len > 255)) { + m_len -= 255; + *op++ = 0; } + *op++ = (m_len); } -m3_m4_offset: - *op++ = ((m_off & 63) << 2); + *op++ = (m_off << 2); *op++ = (m_off >> 6); } - - ii = ip; - if (unlikely(ip >= ip_end)) - break; + goto next; } - *out_len = op - out; - return in_end - ii; + return in_end - (ii - ti); } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, - size_t *out_len, void *wrkmem) +int lzo1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - const unsigned char *ii; + const unsigned char *ip = in; unsigned char *op = out; - size_t t; + size_t l = in_len; + size_t t = 0; - if (unlikely(in_len <= M2_MAX_LEN + 5)) { - t = in_len; - } else { - t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem); + while (l > 20) { + size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); + uintptr_t ll_end = (uintptr_t) ip + ll; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) + break; + BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); + memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); + t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); + ip += ll; op += *out_len; + l -= ll; } + t += l; if (t > 0) { - ii = in + in_len - t; + const unsigned char *ii = in + in_len - t; if (op == out && t <= 238) { *op++ = (17 + t); @@ -198,16 +247,21 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, *op++ = (t - 3); } else { size_t tt = t - 18; - *op++ = 0; while (tt > 255) { tt -= 255; *op++ = 0; } - *op++ = tt; } - do { + if (t >= 16) do { + COPY8(op, ii); + COPY8(op + 8, ii + 8); + op += 16; + ii += 16; + t -= 16; + } while (t >= 16); + if (t > 0) do { *op++ = *ii++; } while (--t > 0); } @@ -223,4 +277,3 @@ EXPORT_SYMBOL_GPL(lzo1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); - diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c deleted file mode 100644 index f2fd09850223..000000000000 --- a/lib/lzo/lzo1x_decompress.c +++ /dev/null @@ -1,255 +0,0 @@ -/* - * LZO1X Decompressor from MiniLZO - * - * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> - * - * The full LZO package can be found at: - * http://www.oberhumer.com/opensource/lzo/ - * - * Changed for kernel use by: - * Nitin Gupta <nitingupta910@gmail.com> - * Richard Purdie <rpurdie@openedhand.com> - */ - -#ifndef STATIC -#include <linux/module.h> -#include <linux/kernel.h> -#endif - -#include <asm/unaligned.h> -#include <linux/lzo.h> -#include "lzodefs.h" - -#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) -#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x)) -#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op) - -#define COPY4(dst, src) \ - put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) - -int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len) -{ - const unsigned char * const ip_end = in + in_len; - unsigned char * const op_end = out + *out_len; - const unsigned char *ip = in, *m_pos; - unsigned char *op = out; - size_t t; - - *out_len = 0; - - if (*ip > 17) { - t = *ip++ - 17; - if (t < 4) - goto match_next; - if (HAVE_OP(t, op_end, op)) - goto output_overrun; - if (HAVE_IP(t + 1, ip_end, ip)) - goto input_overrun; - do { - *op++ = *ip++; - } while (--t > 0); - goto first_literal_run; - } - - while ((ip < ip_end)) { - t = *ip++; - if (t >= 16) - goto match; - if (t == 0) { - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - while (*ip == 0) { - t += 255; - ip++; - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - } - t += 15 + *ip++; - } - if (HAVE_OP(t + 3, op_end, op)) - goto output_overrun; - if (HAVE_IP(t + 4, ip_end, ip)) - goto input_overrun; - - COPY4(op, ip); - op += 4; - ip += 4; - if (--t > 0) { - if (t >= 4) { - do { - COPY4(op, ip); - op += 4; - ip += 4; - t -= 4; - } while (t >= 4); - if (t > 0) { - do { - *op++ = *ip++; - } while (--t > 0); - } - } else { - do { - *op++ = *ip++; - } while (--t > 0); - } - } - -first_literal_run: - t = *ip++; - if (t >= 16) - goto match; - m_pos = op - (1 + M2_MAX_OFFSET); - m_pos -= t >> 2; - m_pos -= *ip++ << 2; - - if (HAVE_LB(m_pos, out, op)) - goto lookbehind_overrun; - - if (HAVE_OP(3, op_end, op)) - goto output_overrun; - *op++ = *m_pos++; - *op++ = *m_pos++; - *op++ = *m_pos; - - goto match_done; - - do { -match: - if (t >= 64) { - m_pos = op - 1; - m_pos -= (t >> 2) & 7; - m_pos -= *ip++ << 3; - t = (t >> 5) - 1; - if (HAVE_LB(m_pos, out, op)) - goto lookbehind_overrun; - if (HAVE_OP(t + 3 - 1, op_end, op)) - goto output_overrun; - goto copy_match; - } else if (t >= 32) { - t &= 31; - if (t == 0) { - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - while (*ip == 0) { - t += 255; - ip++; - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - } - t += 31 + *ip++; - } - m_pos = op - 1; - m_pos -= get_unaligned_le16(ip) >> 2; - ip += 2; - } else if (t >= 16) { - m_pos = op; - m_pos -= (t & 8) << 11; - - t &= 7; - if (t == 0) { - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - while (*ip == 0) { - t += 255; - ip++; - if (HAVE_IP(1, ip_end, ip)) - goto input_overrun; - } - t += 7 + *ip++; - } - m_pos -= get_unaligned_le16(ip) >> 2; - ip += 2; - if (m_pos == op) - goto eof_found; - m_pos -= 0x4000; - } else { - m_pos = op - 1; - m_pos -= t >> 2; - m_pos -= *ip++ << 2; - - if (HAVE_LB(m_pos, out, op)) - goto lookbehind_overrun; - if (HAVE_OP(2, op_end, op)) - goto output_overrun; - - *op++ = *m_pos++; - *op++ = *m_pos; - goto match_done; - } - - if (HAVE_LB(m_pos, out, op)) - goto lookbehind_overrun; - if (HAVE_OP(t + 3 - 1, op_end, op)) - goto output_overrun; - - if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) { - COPY4(op, m_pos); - op += 4; - m_pos += 4; - t -= 4 - (3 - 1); - do { - COPY4(op, m_pos); - op += 4; - m_pos += 4; - t -= 4; - } while (t >= 4); - if (t > 0) - do { - *op++ = *m_pos++; - } while (--t > 0); - } else { -copy_match: - *op++ = *m_pos++; - *op++ = *m_pos++; - do { - *op++ = *m_pos++; - } while (--t > 0); - } -match_done: - t = ip[-2] & 3; - if (t == 0) - break; -match_next: - if (HAVE_OP(t, op_end, op)) - goto output_overrun; - if (HAVE_IP(t + 1, ip_end, ip)) - goto input_overrun; - - *op++ = *ip++; - if (t > 1) { - *op++ = *ip++; - if (t > 2) - *op++ = *ip++; - } - - t = *ip++; - } while (ip < ip_end); - } - - *out_len = op - out; - return LZO_E_EOF_NOT_FOUND; - -eof_found: - *out_len = op - out; - return (ip == ip_end ? LZO_E_OK : - (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN)); -input_overrun: - *out_len = op - out; - return LZO_E_INPUT_OVERRUN; - -output_overrun: - *out_len = op - out; - return LZO_E_OUTPUT_OVERRUN; - -lookbehind_overrun: - *out_len = op - out; - return LZO_E_LOOKBEHIND_OVERRUN; -} -#ifndef STATIC -EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("LZO1X Decompressor"); - -#endif diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c new file mode 100644 index 000000000000..569985d522d5 --- /dev/null +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -0,0 +1,237 @@ +/* + * LZO1X Decompressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#ifndef STATIC +#include <linux/module.h> +#include <linux/kernel.h> +#endif +#include <asm/unaligned.h> +#include <linux/lzo.h> +#include "lzodefs.h" + +#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) +#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun +#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun + +int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len) +{ + unsigned char *op; + const unsigned char *ip; + size_t t, next; + size_t state = 0; + const unsigned char *m_pos; + const unsigned char * const ip_end = in + in_len; + unsigned char * const op_end = out + *out_len; + + op = out; + ip = in; + + if (unlikely(in_len < 3)) + goto input_overrun; + if (*ip > 17) { + t = *ip++ - 17; + if (t < 4) { + next = t; + goto match_next; + } + goto copy_literal_run; + } + + for (;;) { + t = *ip++; + if (t < 16) { + if (likely(state == 0)) { + if (unlikely(t == 0)) { + while (unlikely(*ip == 0)) { + t += 255; + ip++; + NEED_IP(1); + } + t += 15 + *ip++; + } + t += 3; +copy_literal_run: +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { + const unsigned char *ie = ip + t; + unsigned char *oe = op + t; + do { + COPY8(op, ip); + op += 8; + ip += 8; + COPY8(op, ip); + op += 8; + ip += 8; + } while (ip < ie); + ip = ie; + op = oe; + } else +#endif + { + NEED_OP(t); + NEED_IP(t + 3); + do { + *op++ = *ip++; + } while (--t > 0); + } + state = 4; + continue; + } else if (state != 4) { + next = t & 3; + m_pos = op - 1; + m_pos -= t >> 2; + m_pos -= *ip++ << 2; + TEST_LB(m_pos); + NEED_OP(2); + op[0] = m_pos[0]; + op[1] = m_pos[1]; + op += 2; + goto match_next; + } else { + next = t & 3; + m_pos = op - (1 + M2_MAX_OFFSET); + m_pos -= t >> 2; + m_pos -= *ip++ << 2; + t = 3; + } + } else if (t >= 64) { + next = t & 3; + m_pos = op - 1; + m_pos -= (t >> 2) & 7; + m_pos -= *ip++ << 3; + t = (t >> 5) - 1 + (3 - 1); + } else if (t >= 32) { + t = (t & 31) + (3 - 1); + if (unlikely(t == 2)) { + while (unlikely(*ip == 0)) { + t += 255; + ip++; + NEED_IP(1); + } + t += 31 + *ip++; + NEED_IP(2); + } + m_pos = op - 1; + next = get_unaligned_le16(ip); + ip += 2; + m_pos -= next >> 2; + next &= 3; + } else { + m_pos = op; + m_pos -= (t & 8) << 11; + t = (t & 7) + (3 - 1); + if (unlikely(t == 2)) { + while (unlikely(*ip == 0)) { + t += 255; + ip++; + NEED_IP(1); + } + t += 7 + *ip++; + NEED_IP(2); + } + next = get_unaligned_le16(ip); + ip += 2; + m_pos -= next >> 2; + next &= 3; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; + } + TEST_LB(m_pos); +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + if (op - m_pos >= 8) { + unsigned char *oe = op + t; + if (likely(HAVE_OP(t + 15))) { + do { + COPY8(op, m_pos); + op += 8; + m_pos += 8; + COPY8(op, m_pos); + op += 8; + m_pos += 8; + } while (op < oe); + op = oe; + if (HAVE_IP(6)) { + state = next; + COPY4(op, ip); + op += next; + ip += next; + continue; + } + } else { + NEED_OP(t); + do { + *op++ = *m_pos++; + } while (op < oe); + } + } else +#endif + { + unsigned char *oe = op + t; + NEED_OP(t); + op[0] = m_pos[0]; + op[1] = m_pos[1]; + op += 2; + m_pos += 2; + do { + *op++ = *m_pos++; + } while (op < oe); + } +match_next: + state = next; + t = next; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + if (likely(HAVE_IP(6) && HAVE_OP(4))) { + COPY4(op, ip); + op += t; + ip += t; + } else +#endif + { + NEED_IP(t + 3); + NEED_OP(t); + while (t > 0) { + *op++ = *ip++; + t--; + } + } + } + +eof_found: + *out_len = op - out; + return (t != 3 ? LZO_E_ERROR : + ip == ip_end ? LZO_E_OK : + ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); + +input_overrun: + *out_len = op - out; + return LZO_E_INPUT_OVERRUN; + +output_overrun: + *out_len = op - out; + return LZO_E_OUTPUT_OVERRUN; + +lookbehind_overrun: + *out_len = op - out; + return LZO_E_LOOKBEHIND_OVERRUN; +} +#ifndef STATIC +EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZO1X Decompressor"); + +#endif diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index b6d482c492ef..6710b83ce72e 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h @@ -1,19 +1,37 @@ /* * lzodefs.h -- architecture, OS and compiler specific defines * - * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * - * Changed for kernel use by: + * Changed for Linux kernel use by: * Nitin Gupta <nitingupta910@gmail.com> * Richard Purdie <rpurdie@openedhand.com> */ -#define LZO_VERSION 0x2020 -#define LZO_VERSION_STRING "2.02" -#define LZO_VERSION_DATE "Oct 17 2005" + +#define COPY4(dst, src) \ + put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) +#if defined(__x86_64__) +#define COPY8(dst, src) \ + put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) +#else +#define COPY8(dst, src) \ + COPY4(dst, src); COPY4((dst) + 4, (src) + 4) +#endif + +#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) +#error "conflicting endian definitions" +#elif defined(__x86_64__) +#define LZO_USE_CTZ64 1 +#define LZO_USE_CTZ32 1 +#elif defined(__i386__) || defined(__powerpc__) +#define LZO_USE_CTZ32 1 +#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) +#define LZO_USE_CTZ32 1 +#endif #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 @@ -34,10 +52,8 @@ #define M3_MARKER 32 #define M4_MARKER 16 -#define D_BITS 14 -#define D_MASK ((1u << D_BITS) - 1) +#define lzo_dict_t unsigned short +#define D_BITS 13 +#define D_SIZE (1u << D_BITS) +#define D_MASK (D_SIZE - 1) #define D_HIGH ((D_MASK >> 1) + 1) - -#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \ - << (s1)) ^ (p)[0]) -#define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0]) diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c index 44b92cb6224f..eb4a04afea80 100644 --- a/lib/notifier-error-inject.c +++ b/lib/notifier-error-inject.c @@ -17,7 +17,7 @@ static int debugfs_errno_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, "%lld\n"); -static struct dentry *debugfs_create_errno(const char *name, mode_t mode, +static struct dentry *debugfs_create_errno(const char *name, umode_t mode, struct dentry *parent, int *value) { return debugfs_create_file(name, mode, parent, value, &fops_errno); @@ -50,7 +50,7 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent, struct notifier_err_inject *err_inject, int priority) { struct notifier_err_inject_action *action; - mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; + umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; struct dentry *actions_dir; diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index af38aedbd874..122f02f9941b 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -117,8 +117,7 @@ static int black_path_count(struct rb_node *rb) static void check(int nr_nodes) { struct rb_node *rb; - int count = 0; - int blacks = 0; + int count = 0, blacks = 0; u32 prev_key = 0; for (rb = rb_first(&root); rb; rb = rb_next(rb)) { @@ -134,7 +133,9 @@ static void check(int nr_nodes) prev_key = node->key; count++; } + WARN_ON_ONCE(count != nr_nodes); + WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); } static void check_augmented(int nr_nodes) @@ -148,7 +149,7 @@ static void check_augmented(int nr_nodes) } } -static int rbtree_test_init(void) +static int __init rbtree_test_init(void) { int i, j; cycles_t time1, time2, time; @@ -221,7 +222,7 @@ static int rbtree_test_init(void) return -EAGAIN; /* Fail will directly unload the module */ } -static void rbtree_test_exit(void) +static void __exit rbtree_test_exit(void) { printk(KERN_ALERT "test exit\n"); } diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7874b01e816e..b83c144d731f 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, } EXPORT_SYMBOL(sg_alloc_table_from_pages); +void __sg_page_iter_start(struct sg_page_iter *piter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset) +{ + piter->__pg_advance = 0; + piter->__nents = nents; + + piter->page = NULL; + piter->sg = sglist; + piter->sg_pgoffset = pgoffset; +} +EXPORT_SYMBOL(__sg_page_iter_start); + +static int sg_page_count(struct scatterlist *sg) +{ + return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; +} + +bool __sg_page_iter_next(struct sg_page_iter *piter) +{ + if (!piter->__nents || !piter->sg) + return false; + + piter->sg_pgoffset += piter->__pg_advance; + piter->__pg_advance = 1; + + while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { + piter->sg_pgoffset -= sg_page_count(piter->sg); + piter->sg = sg_next(piter->sg); + if (!--piter->__nents || !piter->sg) + return false; + } + piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset); + + return true; +} +EXPORT_SYMBOL(__sg_page_iter_next); + /** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started @@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, { memset(miter, 0, sizeof(struct sg_mapping_iter)); - miter->__sg = sgl; - miter->__nents = nents; - miter->__offset = 0; + __sg_page_iter_start(&miter->piter, sgl, nents, 0); WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); miter->__flags = flags; } @@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start); */ bool sg_miter_next(struct sg_mapping_iter *miter) { - unsigned int off, len; - - /* check for end and drop resources from the last iteration */ - if (!miter->__nents) - return false; - sg_miter_stop(miter); - /* get to the next sg if necessary. __offset is adjusted by stop */ - while (miter->__offset == miter->__sg->length) { - if (--miter->__nents) { - miter->__sg = sg_next(miter->__sg); - miter->__offset = 0; - } else + /* + * Get to the next page if necessary. + * __remaining, __offset is adjusted by sg_miter_stop + */ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) return false; - } - /* map the next page */ - off = miter->__sg->offset + miter->__offset; - len = miter->__sg->length - miter->__offset; + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; - miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); - off &= ~PAGE_MASK; - miter->length = min_t(unsigned int, len, PAGE_SIZE - off); - miter->consumed = miter->length; + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + miter->page = miter->piter.page; + miter->consumed = miter->length = miter->__remaining; if (miter->__flags & SG_MITER_ATOMIC) - miter->addr = kmap_atomic(miter->page) + off; + miter->addr = kmap_atomic(miter->page) + miter->__offset; else - miter->addr = kmap(miter->page) + off; + miter->addr = kmap(miter->page) + miter->__offset; return true; } @@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) /* drop resources from the last iteration */ if (miter->addr) { miter->__offset += miter->consumed; + miter->__remaining -= miter->consumed; if (miter->__flags & SG_MITER_TO_SG) flush_kernel_dcache_page(miter->page); diff --git a/lib/show_mem.c b/lib/show_mem.c index 4407f8c9b1f7..b7c72311ad0c 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -18,6 +18,9 @@ void show_mem(unsigned int filter) printk("Mem-Info:\n"); show_free_areas(filter); + if (filter & SHOW_MEM_FILTER_PAGE_COUNT) + return; + for_each_online_pgdat(pgdat) { unsigned long i, flags; diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 1cffc223bff5..ed5c1454dd62 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -2,10 +2,12 @@ * Helpers for formatting and printing strings * * Copyright 31 August 2008 James Bottomley + * Copyright (C) 2013, Intel Corporation */ #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> +#include <linux/ctype.h> #include <linux/string_helpers.h> /** @@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units, return 0; } EXPORT_SYMBOL(string_get_size); + +static bool unescape_space(char **src, char **dst) +{ + char *p = *dst, *q = *src; + + switch (*q) { + case 'n': + *p = '\n'; + break; + case 'r': + *p = '\r'; + break; + case 't': + *p = '\t'; + break; + case 'v': + *p = '\v'; + break; + case 'f': + *p = '\f'; + break; + default: + return false; + } + *dst += 1; + *src += 1; + return true; +} + +static bool unescape_octal(char **src, char **dst) +{ + char *p = *dst, *q = *src; + u8 num; + + if (isodigit(*q) == 0) + return false; + + num = (*q++) & 7; + while (num < 32 && isodigit(*q) && (q - *src < 3)) { + num <<= 3; + num += (*q++) & 7; + } + *p = num; + *dst += 1; + *src = q; + return true; +} + +static bool unescape_hex(char **src, char **dst) +{ + char *p = *dst, *q = *src; + int digit; + u8 num; + + if (*q++ != 'x') + return false; + + num = digit = hex_to_bin(*q++); + if (digit < 0) + return false; + + digit = hex_to_bin(*q); + if (digit >= 0) { + q++; + num = (num << 4) | digit; + } + *p = num; + *dst += 1; + *src = q; + return true; +} + +static bool unescape_special(char **src, char **dst) +{ + char *p = *dst, *q = *src; + + switch (*q) { + case '\"': + *p = '\"'; + break; + case '\\': + *p = '\\'; + break; + case 'a': + *p = '\a'; + break; + case 'e': + *p = '\e'; + break; + default: + return false; + } + *dst += 1; + *src += 1; + return true; +} + +int string_unescape(char *src, char *dst, size_t size, unsigned int flags) +{ + char *out = dst; + + while (*src && --size) { + if (src[0] == '\\' && src[1] != '\0' && size > 1) { + src++; + size--; + + if (flags & UNESCAPE_SPACE && + unescape_space(&src, &out)) + continue; + + if (flags & UNESCAPE_OCTAL && + unescape_octal(&src, &out)) + continue; + + if (flags & UNESCAPE_HEX && + unescape_hex(&src, &out)) + continue; + + if (flags & UNESCAPE_SPECIAL && + unescape_special(&src, &out)) + continue; + + *out++ = '\\'; + } + *out++ = *src++; + } + *out = '\0'; + + return out - dst; +} +EXPORT_SYMBOL(string_unescape); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) if (!strcmp(str, "force")) swiotlb_force = 1; - return 1; + return 0; } -__setup("swiotlb=", setup_io_tlb_npages); +early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) @@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) return io_tlb_nslabs; } EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) void __init swiotlb_init(int verbose) { - /* default to 64MB */ - size_t default_size = 64UL<<20; + size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c new file mode 100644 index 000000000000..6ac48de04c0e --- /dev/null +++ b/lib/test-string_helpers.c @@ -0,0 +1,103 @@ +/* + * Test cases for lib/string_helpers.c module. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/string.h> +#include <linux/string_helpers.h> + +struct test_string { + const char *in; + const char *out; + unsigned int flags; +}; + +static const struct test_string strings[] __initconst = { + { + .in = "\\f\\ \\n\\r\\t\\v", + .out = "\f\\ \n\r\t\v", + .flags = UNESCAPE_SPACE, + }, + { + .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777", + .out = " \001\00387\0064\005 \\8aH?7", + .flags = UNESCAPE_OCTAL, + }, + { + .in = "\\xv\\xa\\x2c\\xD\\x6f2", + .out = "\\xv\n,\ro2", + .flags = UNESCAPE_HEX, + }, + { + .in = "\\h\\\\\\\"\\a\\e\\", + .out = "\\h\\\"\a\e\\", + .flags = UNESCAPE_SPECIAL, + }, +}; + +static void __init test_string_unescape(unsigned int flags, bool inplace) +{ + char in[256]; + char out_test[256]; + char out_real[256]; + int i, p = 0, q_test = 0, q_real = sizeof(out_real); + + for (i = 0; i < ARRAY_SIZE(strings); i++) { + const char *s = strings[i].in; + int len = strlen(strings[i].in); + + /* Copy string to in buffer */ + memcpy(&in[p], s, len); + p += len; + + /* Copy expected result for given flags */ + if (flags & strings[i].flags) { + s = strings[i].out; + len = strlen(strings[i].out); + } + memcpy(&out_test[q_test], s, len); + q_test += len; + } + in[p++] = '\0'; + + /* Call string_unescape and compare result */ + if (inplace) { + memcpy(out_real, in, p); + if (flags == UNESCAPE_ANY) + q_real = string_unescape_any_inplace(out_real); + else + q_real = string_unescape_inplace(out_real, flags); + } else if (flags == UNESCAPE_ANY) { + q_real = string_unescape_any(in, out_real, q_real); + } else { + q_real = string_unescape(in, out_real, q_real, flags); + } + + if (q_real != q_test || memcmp(out_test, out_real, q_test)) { + pr_warn("Test failed: flags = %u\n", flags); + print_hex_dump(KERN_WARNING, "Input: ", + DUMP_PREFIX_NONE, 16, 1, in, p - 1, true); + print_hex_dump(KERN_WARNING, "Expected: ", + DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true); + print_hex_dump(KERN_WARNING, "Got: ", + DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true); + } +} + +static int __init test_string_helpers_init(void) +{ + unsigned int i; + + pr_info("Running tests...\n"); + for (i = 0; i < UNESCAPE_ANY + 1; i++) + test_string_unescape(i, false); + test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true); + + return -EINVAL; +} +module_init(test_string_helpers_init); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 000000000000..6f500ef2301d --- /dev/null +++ b/lib/ucs2_string.c @@ -0,0 +1,51 @@ +#include <linux/ucs2_string.h> +#include <linux/module.h> + +/* Return the number of unicode characters in data */ +unsigned long +ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) +{ + unsigned long length = 0; + + while (*s++ != 0 && length < maxlength) + length++; + return length; +} +EXPORT_SYMBOL(ucs2_strnlen); + +unsigned long +ucs2_strlen(const ucs2_char_t *s) +{ + return ucs2_strnlen(s, ~0UL); +} +EXPORT_SYMBOL(ucs2_strlen); + +/* + * Return the number of bytes is the length of this string + * Note: this is NOT the same as the number of unicode characters + */ +unsigned long +ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) +{ + return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); +} +EXPORT_SYMBOL(ucs2_strsize); + +int +ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) +{ + while (1) { + if (len == 0) + return 0; + if (*a < *b) + return -1; + if (*a > *b) + return 1; + if (*a == 0) /* implies *b == 0 */ + return 0; + a++; + b++; + len--; + } +} +EXPORT_SYMBOL(ucs2_strncmp); diff --git a/lib/usercopy.c b/lib/usercopy.c new file mode 100644 index 000000000000..4f5b1ddbcd25 --- /dev/null +++ b/lib/usercopy.c @@ -0,0 +1,9 @@ +#include <linux/export.h> +#include <linux/bug.h> +#include <linux/uaccess.h> + +void copy_from_user_overflow(void) +{ + WARN(1, "Buffer overflow detected!\n"); +} +EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/lib/uuid.c b/lib/uuid.c index 52a6fe6387de..398821e4dce1 100644 --- a/lib/uuid.c +++ b/lib/uuid.c @@ -25,13 +25,7 @@ static void __uuid_gen_common(__u8 b[16]) { - int i; - u32 r; - - for (i = 0; i < 4; i++) { - r = random32(); - memcpy(b + i * 4, &r, 4); - } + prandom_bytes(b, 16); /* reversion 0b10 */ b[8] = (b[8] & 0x3F) | 0x80; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0d62fd700f68..e149c6416384 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -534,14 +534,21 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec) static noinline_for_stack char *symbol_string(char *buf, char *end, void *ptr, - struct printf_spec spec, char ext) + struct printf_spec spec, const char *fmt) { - unsigned long value = (unsigned long) ptr; + unsigned long value; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; - if (ext == 'B') +#endif + + if (fmt[1] == 'R') + ptr = __builtin_extract_return_addr(ptr); + value = (unsigned long)ptr; + +#ifdef CONFIG_KALLSYMS + if (*fmt == 'B') sprint_backtrace(sym, value); - else if (ext != 'f' && ext != 's') + else if (*fmt != 'f' && *fmt != 's') sprint_symbol(sym, value); else sprint_symbol_no_offset(sym, value); @@ -987,6 +994,7 @@ int kptr_restrict __read_mostly; * - 'f' For simple symbolic function names without offset * - 'S' For symbolic direct pointers with offset * - 's' For symbolic direct pointers without offset + * - '[FfSs]R' as above with __builtin_extract_return_addr() translation * - 'B' For backtraced symbolic direct pointers with offset * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] @@ -1060,7 +1068,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'S': case 's': case 'B': - return symbol_string(buf, end, ptr, spec, *fmt); + return symbol_string(buf, end, ptr, spec, fmt); case 'R': case 'r': return resource_string(buf, end, ptr, spec, fmt); diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 82a04d7ba99e..08837db52d94 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -15,7 +15,7 @@ config XZ_DEC_X86 config XZ_DEC_POWERPC bool "PowerPC BCJ filter decoder" - default y if POWERPC + default y if PPC select XZ_DEC_BCJ config XZ_DEC_IA64 |