diff options
author | Arnd Bergmann <arnd@arndb.de> | 2009-05-14 00:56:36 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe2.(none)> | 2009-06-11 21:02:50 +0200 |
commit | 5c01b46bb6bb8f2662573c05c87b5d68fa25af89 (patch) | |
tree | 77a85f19546e08ad8dfdb21b380bbdf8fb1c857e /include/asm-generic/cacheflush.h | |
parent | asm-generic: add generic atomic.h and io.h (diff) | |
download | linux-5c01b46bb6bb8f2662573c05c87b5d68fa25af89.tar.xz linux-5c01b46bb6bb8f2662573c05c87b5d68fa25af89.zip |
asm-generic: add generic NOMMU versions of some headers
Memory management in generic is highly architecture specific,
but on NOMMU architectures, it is mostly trivial, so just
add a default implementation in asm-generic that applies
to all NOMMU architectures.
The two files cache.h and cacheflush.h can possibly also
be used by architectures that have an MMU but never require
flushing the cache or have cache lines larger than 32 bytes.
Signed-off-by: Remis Lima Baima <remis.developer@googlemail.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic/cacheflush.h')
-rw-r--r-- | include/asm-generic/cacheflush.h | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h new file mode 100644 index 000000000000..ba4ec39a1131 --- /dev/null +++ b/include/asm-generic/cacheflush.h @@ -0,0 +1,30 @@ +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* __ASM_CACHEFLUSH_H */ |