diff options
author | Oskar Schirmer <os@emlix.com> | 2009-06-10 21:58:45 +0200 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2009-06-22 11:36:50 +0200 |
commit | bd974240c9a7c6c560504bf390cd8985a16b68f6 (patch) | |
tree | fdece95f79c93bfda475c5734da3e247d5f93d99 /arch/xtensa/include/asm/cacheflush.h | |
parent | Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drz... (diff) | |
download | linux-bd974240c9a7c6c560504bf390cd8985a16b68f6.tar.xz linux-bd974240c9a7c6c560504bf390cd8985a16b68f6.zip |
xtensa: cache inquiry and unaligned cache handling functions
The existing xtensa cache handling functions work on page-aligned
memory regions.
These functions are needed for the s6000 dma engine which can work on
a byte-granularity.
Signed-off-by: Oskar Schirmer <os@emlix.com>
Cc: Johannes Weiner <jw@emlix.com>
Cc: Daniel Glockner <dg@emlix.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/include/asm/cacheflush.h')
-rw-r--r-- | arch/xtensa/include/asm/cacheflush.h | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 8fc1c0c8de07..b7b8fbe47c77 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -155,5 +155,100 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*, #endif +#define XTENSA_CACHEBLK_LOG2 29 +#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2) +#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2) + +#if XCHAL_HAVE_CACHEATTR +static inline u32 xtensa_get_cacheattr(void) +{ + u32 r; + asm volatile(" rsr %0, CACHEATTR" : "=a"(r)); + return r; +} + +static inline u32 xtensa_get_dtlb1(u32 addr) +{ + u32 r = addr & XTENSA_CACHEBLK_MASK; + return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2))) + & 0xF); +} +#else +static inline u32 xtensa_get_dtlb1(u32 addr) +{ + u32 r; + asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr)); + asm volatile(" dsync"); + return r; +} + +static inline u32 xtensa_get_cacheattr(void) +{ + u32 r = 0; + u32 a = 0; + do { + a -= XTENSA_CACHEBLK_SIZE; + r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF); + } while (a); + return r; +} +#endif + +static inline int xtensa_need_flush_dma_source(u32 addr) +{ + return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4; +} + +static inline int xtensa_need_invalidate_dma_destination(u32 addr) +{ + return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2; +} + +static inline void flush_dcache_unaligned(u32 addr, u32 size) +{ + u32 cnt; + if (size) { + cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) + + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; + while (cnt--) { + asm volatile(" dhwb %0, 0" : : "a"(addr)); + addr += XCHAL_DCACHE_LINESIZE; + } + asm volatile(" dsync"); + } +} + +static inline void invalidate_dcache_unaligned(u32 addr, u32 size) +{ + int cnt; + if (size) { + asm volatile(" dhwbi %0, 0 ;" : : "a"(addr)); + cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) + - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; + while (cnt-- > 0) { + asm volatile(" dhi %0, %1" : : "a"(addr), + "n"(XCHAL_DCACHE_LINESIZE)); + addr += XCHAL_DCACHE_LINESIZE; + } + asm volatile(" dhwbi %0, %1" : : "a"(addr), + "n"(XCHAL_DCACHE_LINESIZE)); + asm volatile(" dsync"); + } +} + +static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) +{ + u32 cnt; + if (size) { + cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) + + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; + while (cnt--) { + asm volatile(" dhwbi %0, 0" : : "a"(addr)); + addr += XCHAL_DCACHE_LINESIZE; + } + asm volatile(" dsync"); + } +} + #endif /* __KERNEL__ */ #endif /* _XTENSA_CACHEFLUSH_H */ |