diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 08:53:38 +0200 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 08:53:38 +0200 |
commit | bc47ab0241c7c86da4f5e5f82fbca7d45387c18d (patch) | |
tree | b9c33ae8b6de43e44cc5fcbaa3e4a15f18a5ed42 /arch/sh/include/asm/unaligned-sh4a.h | |
parent | powerpc: Fix bug in move of altivec code to vector.S (diff) | |
parent | block: fix kernel-doc in recent block/ changes (diff) | |
download | linux-bc47ab0241c7c86da4f5e5f82fbca7d45387c18d.tar.xz linux-bc47ab0241c7c86da4f5e5f82fbca7d45387c18d.zip |
Merge commit 'origin/master' into next
Manual merge of:
arch/powerpc/kernel/asm-offsets.c
Diffstat (limited to 'arch/sh/include/asm/unaligned-sh4a.h')
-rw-r--r-- | arch/sh/include/asm/unaligned-sh4a.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h index d8f89770275b..9f4dd252c981 100644 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -3,9 +3,9 @@ /* * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. - * Support for 16 and 64-bit accesses are done through shifting and - * masking relative to the endianness. Unaligned stores are not supported - * by the instruction encoding, so these continue to use the packed + * Support for 64-bit accesses are done through shifting and masking + * relative to the endianness. Unaligned stores are not supported by the + * instruction encoding, so these continue to use the packed * struct. * * The same note as with the movli.l/movco.l pair applies here, as long @@ -41,9 +41,9 @@ struct __una_u64 { u64 x __attribute__((packed)); }; static inline u16 __get_unaligned_cpu16(const u8 *p) { #ifdef __LITTLE_ENDIAN - return __get_unaligned_cpu32(p) & 0xffff; + return p[0] | p[1] << 8; #else - return __get_unaligned_cpu32(p) >> 16; + return p[0] << 8 | p[1]; #endif } |