summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2010-10-27 18:28:43 +0200
committerDavid Howells <dhowells@redhat.com>2010-10-27 18:28:43 +0200
commit93c10d3d68c469c1addacbc541da5518f1de021d (patch)
tree0bf4b073fbd5a9362551b39757137cbab2f448a8 /arch/mn10300/include
parentMN10300: Provide a MN10300_CACHE_ENABLED config option (diff)
downloadlinux-93c10d3d68c469c1addacbc541da5518f1de021d.tar.xz
linux-93c10d3d68c469c1addacbc541da5518f1de021d.zip
MN10300: Reorder asm/cacheflush.h to put primitives first
Reorder asm/cacheflush.h to put arch primitives first, before the main functions so that the main functions can be inline asm rather than #defines when non-trivial. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/include')
-rw-r--r--arch/mn10300/include/asm/cacheflush.h88
1 files changed, 44 insertions, 44 deletions
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index b85be1d2fd32..0b5d00438374 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,49 +17,7 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
- */
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifdef CONFIG_MN10300_CACHE_ENABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end) do {} while (0)
-#define flush_icache_page(vma, pg) do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
- flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_page(vma, page); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-/*
- * primitive routines
+ * Primitive routines
*/
#ifdef CONFIG_MN10300_CACHE_ENABLED
extern void mn10300_icache_inv(void);
@@ -106,7 +64,49 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#endif /* CONFIG_MN10300_CACHE_ENABLED */
/*
- * internal debugging function
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+
+extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
+
+#else
+
+#define flush_icache_range(start, end) do {} while (0)
+#define flush_icache_page(vma, pg) do {} while (0)
+
+#endif
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+ flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_page(vma, page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+/*
+ * Internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);