diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 05:58:12 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 05:58:12 +0100 |
commit | 848b81415c42ff3dc9a4204749087b015c37ef66 (patch) | |
tree | 391da3a73aea48632248220d2d6b8d45a88f7eae /lib | |
parent | efi: Fix the build with user namespaces enabled. (diff) | |
parent | scatterlist: don't BUG when we can trivially return a proper error. (diff) | |
download | linux-848b81415c42ff3dc9a4204749087b015c37ef66.tar.xz linux-848b81415c42ff3dc9a4204749087b015c37ef66.zip |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge misc patches from Andrew Morton:
"Incoming:
- lots of misc stuff
- backlight tree updates
- lib/ updates
- Oleg's percpu-rwsem changes
- checkpatch
- rtc
- aoe
- more checkpoint/restart support
I still have a pile of MM stuff pending - Pekka should be merging
later today after which that is good to go. A number of other things
are twiddling thumbs awaiting maintainer merges."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (180 commits)
scatterlist: don't BUG when we can trivially return a proper error.
docs: update documentation about /proc/<pid>/fdinfo/<fd> fanotify output
fs, fanotify: add @mflags field to fanotify output
docs: add documentation about /proc/<pid>/fdinfo/<fd> output
fs, notify: add procfs fdinfo helper
fs, exportfs: add exportfs_encode_inode_fh() helper
fs, exportfs: escape nil dereference if no s_export_op present
fs, epoll: add procfs fdinfo helper
fs, eventfd: add procfs fdinfo helper
procfs: add ability to plug in auxiliary fdinfo providers
tools/testing/selftests/kcmp/kcmp_test.c: print reason for failure in kcmp_test
breakpoint selftests: print failure status instead of cause make error
kcmp selftests: print fail status instead of cause make error
kcmp selftests: make run_tests fix
mem-hotplug selftests: print failure status instead of cause make error
cpu-hotplug selftests: print failure status instead of cause make error
mqueue selftests: print failure status instead of cause make error
vm selftests: print failure status instead of cause make error
ubifs: use prandom_bytes
mtd: nandsim: use prandom_bytes
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 9 | ||||
-rw-r--r-- | lib/interval_tree_test_main.c | 7 | ||||
-rw-r--r-- | lib/kstrtox.c | 64 | ||||
-rw-r--r-- | lib/percpu-rwsem.c | 165 | ||||
-rw-r--r-- | lib/random32.c | 97 | ||||
-rw-r--r-- | lib/rbtree_test.c | 8 | ||||
-rw-r--r-- | lib/scatterlist.c | 3 | ||||
-rw-r--r-- | lib/vsprintf.c | 109 |
10 files changed, 382 insertions, 84 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 4b31a46fb307..75cdb77fa49d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -42,6 +42,9 @@ config GENERIC_IO config STMP_DEVICE bool +config PERCPU_RWSEM + boolean + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Makefile b/lib/Makefile index e2152fa7ff4d..5558e35170cd 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e7f7d993357a..1db1fc660538 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -62,13 +62,6 @@ static LIST_HEAD(ddebug_tables); static int verbose = 0; module_param(verbose, int, 0644); -/* Return the last part of a pathname */ -static inline const char *basename(const char *path) -{ - const char *tail = strrchr(path, '/'); - return tail ? tail+1 : path; -} - /* Return the path relative to source root */ static inline const char *trim_prefix(const char *path) { @@ -154,7 +147,7 @@ static int ddebug_change(const struct ddebug_query *query, /* match against the source filename */ if (query->filename && strcmp(query->filename, dp->filename) && - strcmp(query->filename, basename(dp->filename)) && + strcmp(query->filename, kbasename(dp->filename)) && strcmp(query->filename, trim_prefix(dp->filename))) continue; diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c index b25903987f7a..245900b98c8e 100644 --- a/lib/interval_tree_test_main.c +++ b/lib/interval_tree_test_main.c @@ -30,7 +30,8 @@ static void init(void) { int i; for (i = 0; i < NODES; i++) { - u32 a = prandom32(&rnd), b = prandom32(&rnd); + u32 a = prandom_u32_state(&rnd); + u32 b = prandom_u32_state(&rnd); if (a <= b) { nodes[i].start = a; nodes[i].last = b; @@ -40,7 +41,7 @@ static void init(void) } } for (i = 0; i < SEARCHES; i++) - queries[i] = prandom32(&rnd); + queries[i] = prandom_u32_state(&rnd); } static int interval_tree_test_init(void) @@ -51,7 +52,7 @@ static int interval_tree_test_init(void) printk(KERN_ALERT "interval tree insert/remove"); - prandom32_seed(&rnd, 3141592653589793238ULL); + prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); diff --git a/lib/kstrtox.c b/lib/kstrtox.c index c3615eab0cc3..f78ae0c0c4e2 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -104,6 +104,22 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) return 0; } +/** + * kstrtoull - convert a string to an unsigned long long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { if (s[0] == '+') @@ -112,6 +128,22 @@ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) } EXPORT_SYMBOL(kstrtoull); +/** + * kstrtoll - convert a string to a long long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ int kstrtoll(const char *s, unsigned int base, long long *res) { unsigned long long tmp; @@ -168,6 +200,22 @@ int _kstrtol(const char *s, unsigned int base, long *res) } EXPORT_SYMBOL(_kstrtol); +/** + * kstrtouint - convert a string to an unsigned int + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) { unsigned long long tmp; @@ -183,6 +231,22 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res) } EXPORT_SYMBOL(kstrtouint); +/** + * kstrtoint - convert a string to an int + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign or a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ int kstrtoint(const char *s, unsigned int base, int *res) { long long tmp; diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c new file mode 100644 index 000000000000..652a8ee8efe9 --- /dev/null +++ b/lib/percpu-rwsem.c @@ -0,0 +1,165 @@ +#include <linux/atomic.h> +#include <linux/rwsem.h> +#include <linux/percpu.h> +#include <linux/wait.h> +#include <linux/lockdep.h> +#include <linux/percpu-rwsem.h> +#include <linux/rcupdate.h> +#include <linux/sched.h> +#include <linux/errno.h> + +int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, + const char *name, struct lock_class_key *rwsem_key) +{ + brw->fast_read_ctr = alloc_percpu(int); + if (unlikely(!brw->fast_read_ctr)) + return -ENOMEM; + + /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ + __init_rwsem(&brw->rw_sem, name, rwsem_key); + atomic_set(&brw->write_ctr, 0); + atomic_set(&brw->slow_read_ctr, 0); + init_waitqueue_head(&brw->write_waitq); + return 0; +} + +void percpu_free_rwsem(struct percpu_rw_semaphore *brw) +{ + free_percpu(brw->fast_read_ctr); + brw->fast_read_ctr = NULL; /* catch use after free bugs */ +} + +/* + * This is the fast-path for down_read/up_read, it only needs to ensure + * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the + * fast per-cpu counter. The writer uses synchronize_sched_expedited() to + * serialize with the preempt-disabled section below. + * + * The nontrivial part is that we should guarantee acquire/release semantics + * in case when + * + * R_W: down_write() comes after up_read(), the writer should see all + * changes done by the reader + * or + * W_R: down_read() comes after up_write(), the reader should see all + * changes done by the writer + * + * If this helper fails the callers rely on the normal rw_semaphore and + * atomic_dec_and_test(), so in this case we have the necessary barriers. + * + * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or + * __this_cpu_add() below can be reordered with any LOAD/STORE done by the + * reader inside the critical section. See the comments in down_write and + * up_write below. + */ +static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) +{ + bool success = false; + + preempt_disable(); + if (likely(!atomic_read(&brw->write_ctr))) { + __this_cpu_add(*brw->fast_read_ctr, val); + success = true; + } + preempt_enable(); + + return success; +} + +/* + * Like the normal down_read() this is not recursive, the writer can + * come after the first percpu_down_read() and create the deadlock. + * + * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, + * percpu_up_read() does rwsem_release(). This pairs with the usage + * of ->rw_sem in percpu_down/up_write(). + */ +void percpu_down_read(struct percpu_rw_semaphore *brw) +{ + might_sleep(); + if (likely(update_fast_ctr(brw, +1))) { + rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); + return; + } + + down_read(&brw->rw_sem); + atomic_inc(&brw->slow_read_ctr); + /* avoid up_read()->rwsem_release() */ + __up_read(&brw->rw_sem); +} + +void percpu_up_read(struct percpu_rw_semaphore *brw) +{ + rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); + + if (likely(update_fast_ctr(brw, -1))) + return; + + /* false-positive is possible but harmless */ + if (atomic_dec_and_test(&brw->slow_read_ctr)) + wake_up_all(&brw->write_waitq); +} + +static int clear_fast_ctr(struct percpu_rw_semaphore *brw) +{ + unsigned int sum = 0; + int cpu; + + for_each_possible_cpu(cpu) { + sum += per_cpu(*brw->fast_read_ctr, cpu); + per_cpu(*brw->fast_read_ctr, cpu) = 0; + } + + return sum; +} + +/* + * A writer increments ->write_ctr to force the readers to switch to the + * slow mode, note the atomic_read() check in update_fast_ctr(). + * + * After that the readers can only inc/dec the slow ->slow_read_ctr counter, + * ->fast_read_ctr is stable. Once the writer moves its sum into the slow + * counter it represents the number of active readers. + * + * Finally the writer takes ->rw_sem for writing and blocks the new readers, + * then waits until the slow counter becomes zero. + */ +void percpu_down_write(struct percpu_rw_semaphore *brw) +{ + /* tell update_fast_ctr() there is a pending writer */ + atomic_inc(&brw->write_ctr); + /* + * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read + * so that update_fast_ctr() can't succeed. + * + * 2. Ensures we see the result of every previous this_cpu_add() in + * update_fast_ctr(). + * + * 3. Ensures that if any reader has exited its critical section via + * fast-path, it executes a full memory barrier before we return. + * See R_W case in the comment above update_fast_ctr(). + */ + synchronize_sched_expedited(); + + /* exclude other writers, and block the new readers completely */ + down_write(&brw->rw_sem); + + /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ + atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); + + /* wait for all readers to complete their percpu_up_read() */ + wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); +} + +void percpu_up_write(struct percpu_rw_semaphore *brw) +{ + /* release the lock, but the readers can't use the fast-path */ + up_write(&brw->rw_sem); + /* + * Insert the barrier before the next fast-path in down_read, + * see W_R case in the comment above update_fast_ctr(). + */ + synchronize_sched_expedited(); + /* the last writer unblocks update_fast_ctr() */ + atomic_dec(&brw->write_ctr); +} diff --git a/lib/random32.c b/lib/random32.c index 938bde5876ac..52280d5526be 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -42,13 +42,13 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** - * prandom32 - seeded pseudo-random number generator. + * prandom_u32_state - seeded pseudo-random number generator. * @state: pointer to state structure holding seeded state. * * This is used for pseudo-randomness with no outside seeding. - * For more random results, use random32(). + * For more random results, use prandom_u32(). */ -u32 prandom32(struct rnd_state *state) +u32 prandom_u32_state(struct rnd_state *state) { #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) @@ -58,32 +58,81 @@ u32 prandom32(struct rnd_state *state) return (state->s1 ^ state->s2 ^ state->s3); } -EXPORT_SYMBOL(prandom32); +EXPORT_SYMBOL(prandom_u32_state); /** - * random32 - pseudo random number generator + * prandom_u32 - pseudo random number generator * * A 32 bit pseudo-random number is generated using a fast * algorithm suitable for simulation. This algorithm is NOT * considered safe for cryptographic use. */ -u32 random32(void) +u32 prandom_u32(void) { unsigned long r; struct rnd_state *state = &get_cpu_var(net_rand_state); - r = prandom32(state); + r = prandom_u32_state(state); put_cpu_var(state); return r; } -EXPORT_SYMBOL(random32); +EXPORT_SYMBOL(prandom_u32); + +/* + * prandom_bytes_state - get the requested number of pseudo-random bytes + * + * @state: pointer to state structure holding seeded state. + * @buf: where to copy the pseudo-random bytes to + * @bytes: the requested number of bytes + * + * This is used for pseudo-randomness with no outside seeding. + * For more random results, use prandom_bytes(). + */ +void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) +{ + unsigned char *p = buf; + int i; + + for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { + u32 random = prandom_u32_state(state); + int j; + + for (j = 0; j < sizeof(u32); j++) { + p[i + j] = random; + random >>= BITS_PER_BYTE; + } + } + if (i < bytes) { + u32 random = prandom_u32_state(state); + + for (; i < bytes; i++) { + p[i] = random; + random >>= BITS_PER_BYTE; + } + } +} +EXPORT_SYMBOL(prandom_bytes_state); + +/** + * prandom_bytes - get the requested number of pseudo-random bytes + * @buf: where to copy the pseudo-random bytes to + * @bytes: the requested number of bytes + */ +void prandom_bytes(void *buf, int bytes) +{ + struct rnd_state *state = &get_cpu_var(net_rand_state); + + prandom_bytes_state(state, buf, bytes); + put_cpu_var(state); +} +EXPORT_SYMBOL(prandom_bytes); /** - * srandom32 - add entropy to pseudo random number generator + * prandom_seed - add entropy to pseudo random number generator * @seed: seed value * - * Add some additional seeding to the random32() pool. + * Add some additional seeding to the prandom pool. */ -void srandom32(u32 entropy) +void prandom_seed(u32 entropy) { int i; /* @@ -95,13 +144,13 @@ void srandom32(u32 entropy) state->s1 = __seed(state->s1 ^ entropy, 1); } } -EXPORT_SYMBOL(srandom32); +EXPORT_SYMBOL(prandom_seed); /* * Generate some initially weak seeding values to allow - * to start the random32() engine. + * to start the prandom_u32() engine. */ -static int __init random32_init(void) +static int __init prandom_init(void) { int i; @@ -114,22 +163,22 @@ static int __init random32_init(void) state->s3 = __seed(LCG(state->s2), 15); /* "warm it up" */ - prandom32(state); - prandom32(state); - prandom32(state); - prandom32(state); - prandom32(state); - prandom32(state); + prandom_u32_state(state); + prandom_u32_state(state); + prandom_u32_state(state); + prandom_u32_state(state); + prandom_u32_state(state); + prandom_u32_state(state); } return 0; } -core_initcall(random32_init); +core_initcall(prandom_init); /* * Generate better values after random number generator * is fully initialized. */ -static int __init random32_reseed(void) +static int __init prandom_reseed(void) { int i; @@ -143,8 +192,8 @@ static int __init random32_reseed(void) state->s3 = __seed(seeds[2], 15); /* mix it in */ - prandom32(state); + prandom_u32_state(state); } return 0; } -late_initcall(random32_reseed); +late_initcall(prandom_reseed); diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 268b23951fec..af38aedbd874 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -96,8 +96,8 @@ static void init(void) { int i; for (i = 0; i < NODES; i++) { - nodes[i].key = prandom32(&rnd); - nodes[i].val = prandom32(&rnd); + nodes[i].key = prandom_u32_state(&rnd); + nodes[i].val = prandom_u32_state(&rnd); } } @@ -118,7 +118,7 @@ static void check(int nr_nodes) { struct rb_node *rb; int count = 0; - int blacks; + int blacks = 0; u32 prev_key = 0; for (rb = rb_first(&root); rb; rb = rb_next(rb)) { @@ -155,7 +155,7 @@ static int rbtree_test_init(void) printk(KERN_ALERT "rbtree testing"); - prandom32_seed(&rnd, 3141592653589793238ULL); + prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 3675452b23ca..7874b01e816e 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -248,7 +248,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int left; #ifndef ARCH_HAS_SG_CHAIN - BUG_ON(nents > max_ents); + if (WARN_ON_ONCE(nents > max_ents)) + return -EINVAL; #endif memset(table, 0, sizeof(*table)); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 39c99fea7c03..fab33a9c5318 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -23,12 +23,12 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/kallsyms.h> +#include <linux/math64.h> #include <linux/uaccess.h> #include <linux/ioport.h> #include <net/addrconf.h> #include <asm/page.h> /* for PAGE_SIZE */ -#include <asm/div64.h> #include <asm/sections.h> /* for dereference_function_descriptor() */ #include "kstrtox.h" @@ -38,6 +38,8 @@ * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use + * + * This function is obsolete. Please use kstrtoull instead. */ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { @@ -61,6 +63,8 @@ EXPORT_SYMBOL(simple_strtoull); * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use + * + * This function is obsolete. Please use kstrtoul instead. */ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { @@ -73,6 +77,8 @@ EXPORT_SYMBOL(simple_strtoul); * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use + * + * This function is obsolete. Please use kstrtol instead. */ long simple_strtol(const char *cp, char **endp, unsigned int base) { @@ -88,6 +94,8 @@ EXPORT_SYMBOL(simple_strtol); * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use + * + * This function is obsolete. Please use kstrtoll instead. */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { @@ -1485,7 +1493,10 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) num = va_arg(args, long); break; case FORMAT_TYPE_SIZE_T: - num = va_arg(args, size_t); + if (spec.flags & SIGN) + num = va_arg(args, ssize_t); + else + num = va_arg(args, size_t); break; case FORMAT_TYPE_PTRDIFF: num = va_arg(args, ptrdiff_t); @@ -2013,7 +2024,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args) char digit; int num = 0; u8 qualifier; - u8 base; + unsigned int base; + union { + long long s; + unsigned long long u; + } val; s16 field_width; bool is_sign; @@ -2053,8 +2068,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args) /* get field width */ field_width = -1; - if (isdigit(*fmt)) + if (isdigit(*fmt)) { field_width = skip_atoi(&fmt); + if (field_width <= 0) + break; + } /* get conversion qualifier */ qualifier = -1; @@ -2154,58 +2172,61 @@ int vsscanf(const char *buf, const char *fmt, va_list args) || (base == 0 && !isdigit(digit))) break; + if (is_sign) + val.s = qualifier != 'L' ? + simple_strtol(str, &next, base) : + simple_strtoll(str, &next, base); + else + val.u = qualifier != 'L' ? + simple_strtoul(str, &next, base) : + simple_strtoull(str, &next, base); + + if (field_width > 0 && next - str > field_width) { + if (base == 0) + _parse_integer_fixup_radix(str, &base); + while (next - str > field_width) { + if (is_sign) + val.s = div_s64(val.s, base); + else + val.u = div_u64(val.u, base); + --next; + } + } + switch (qualifier) { case 'H': /* that's 'hh' in format */ - if (is_sign) { - signed char *s = (signed char *)va_arg(args, signed char *); - *s = (signed char)simple_strtol(str, &next, base); - } else { - unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); - *s = (unsigned char)simple_strtoul(str, &next, base); - } + if (is_sign) + *va_arg(args, signed char *) = val.s; + else + *va_arg(args, unsigned char *) = val.u; break; case 'h': - if (is_sign) { - short *s = (short *)va_arg(args, short *); - *s = (short)simple_strtol(str, &next, base); - } else { - unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); - *s = (unsigned short)simple_strtoul(str, &next, base); - } + if (is_sign) + *va_arg(args, short *) = val.s; + else + *va_arg(args, unsigned short *) = val.u; break; case 'l': - if (is_sign) { - long *l = (long *)va_arg(args, long *); - *l = simple_strtol(str, &next, base); - } else { - unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); - *l = simple_strtoul(str, &next, base); - } + if (is_sign) + *va_arg(args, long *) = val.s; + else + *va_arg(args, unsigned long *) = val.u; break; case 'L': - if (is_sign) { - long long *l = (long long *)va_arg(args, long long *); - *l = simple_strtoll(str, &next, base); - } else { - unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); - *l = simple_strtoull(str, &next, base); - } + if (is_sign) + *va_arg(args, long long *) = val.s; + else + *va_arg(args, unsigned long long *) = val.u; break; case 'Z': case 'z': - { - size_t *s = (size_t *)va_arg(args, size_t *); - *s = (size_t)simple_strtoul(str, &next, base); - } - break; + *va_arg(args, size_t *) = val.u; + break; default: - if (is_sign) { - int *i = (int *)va_arg(args, int *); - *i = (int)simple_strtol(str, &next, base); - } else { - unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); - *i = (unsigned int)simple_strtoul(str, &next, base); - } + if (is_sign) + *va_arg(args, int *) = val.s; + else + *va_arg(args, unsigned int *) = val.u; break; } num++; |