diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-06 22:54:56 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-06 22:54:56 +0200 |
commit | f68e556e23d1a4176b563bcb25d8baf2c5313f91 (patch) | |
tree | 4c43c375dd0c608ed506953d80ebfedacca37161 | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
download | linux-f68e556e23d1a4176b563bcb25d8baf2c5313f91.tar.xz linux-f68e556e23d1a4176b563bcb25d8baf2c5313f91.zip |
Make the "word-at-a-time" helper functions more commonly usable
I have a new optimized x86 "strncpy_from_user()" that will use these
same helper functions for all the same reasons the name lookup code uses
them. This is preparation for that.
This moves them into an architecture-specific header file. It's
architecture-specific for two reasons:
- some of the functions are likely to want architecture-specific
implementations. Even if the current code happens to be "generic" in
the sense that it should work on any little-endian machine, it's
likely that the "multiply by a big constant and shift" implementation
is less than optimal for an architecture that has a guaranteed fast
bit count instruction, for example.
- I expect that if architectures like sparc want to start playing
around with this, we'll need to abstract out a few more details (in
particular the actual unaligned accesses). So we're likely to have
more architecture-specific stuff if non-x86 architectures start using
this.
(and if it turns out that non-x86 architectures don't start using
this, then having it in an architecture-specific header is still the
right thing to do, of course)
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/include/asm/word-at-a-time.h | 46 | ||||
-rw-r--r-- | fs/namei.c | 35 |
2 files changed, 49 insertions, 32 deletions
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6fe6767b7124 --- /dev/null +++ b/arch/x86/include/asm/word-at-a-time.h @@ -0,0 +1,46 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) + +/* Return the high bit set in the first byte that is a zero */ +static inline unsigned long has_zero(unsigned long a) +{ + return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/fs/namei.c b/fs/namei.c index 1898198abc3d..0062dd17eb55 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1407,18 +1407,9 @@ static inline int can_lookup(struct inode *inode) */ #ifdef CONFIG_DCACHE_WORD_ACCESS -#ifdef CONFIG_64BIT +#include <asm/word-at-a-time.h> -/* - * Jan Achrenius on G+: microoptimized version of - * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" - * that works for the bytemasks without having to - * mask them first. - */ -static inline long count_masked_bytes(unsigned long mask) -{ - return mask*0x0001020304050608ul >> 56; -} +#ifdef CONFIG_64BIT static inline unsigned int fold_hash(unsigned long hash) { @@ -1428,15 +1419,6 @@ static inline unsigned int fold_hash(unsigned long hash) #else /* 32-bit case */ -/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ -static inline long count_masked_bytes(long mask) -{ - /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ - long a = (0x0ff0001+mask) >> 23; - /* Fix the 1 for 00 case */ - return a & mask; -} - #define fold_hash(x) (x) #endif @@ -1464,17 +1446,6 @@ done: } EXPORT_SYMBOL(full_name_hash); -#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) -#define ONEBYTES REPEAT_BYTE(0x01) -#define SLASHBYTES REPEAT_BYTE('/') -#define HIGHBITS REPEAT_BYTE(0x80) - -/* Return the high bit set in the first byte that is a zero */ -static inline unsigned long has_zero(unsigned long a) -{ - return ((a - ONEBYTES) & ~a) & HIGHBITS; -} - /* * Calculate the length and hash of the path component, and * return the length of the component; @@ -1490,7 +1461,7 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp) len += sizeof(unsigned long); a = *(unsigned long *)(name+len); /* Do we have any NUL or '/' bytes in this word? */ - mask = has_zero(a) | has_zero(a ^ SLASHBYTES); + mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/')); } while (!mask); /* The mask *below* the first high bit set */ |