summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2008-11-06 14:23:08 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2008-11-06 14:23:08 +0100
commit376e14218d3d791127e9b9bfbe2f99c44c2a19c2 (patch)
tree227ff719a9b4092a610c51f15a00a599ed072cc5 /arch
parentARMv7: Branch over conditional undefined instructions in vfphw.S (diff)
downloadlinux-376e14218d3d791127e9b9bfbe2f99c44c2a19c2.tar.xz
linux-376e14218d3d791127e9b9bfbe2f99c44c2a19c2.zip
Do not flush the cache in flush_cache_v(un)map for VIPT caches
In case of non-aliasing VIPT caches, there is no need to flush the whole cache when new mapping is created. The patch introduces this condition check. In the non-aliasing VIPT case flush_cache_vmap() needs a DSB since the set_pte_at() function called from vmap_pte_range() does not have such barrier (done usually via TLB flushing functions). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cacheflush.h36
1 files changed, 26 insertions, 10 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index de6c59f814a1..85a2514cbffc 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
#include <asm/glue.h>
#include <asm/shmparam.h>
+#include <asm/cachetype.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
@@ -296,16 +297,6 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#endif
/*
- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
- * vmalloc, ioremap etc) in kernel space for pages. Since the
- * direct-mappings of these pages may contain cached data, we need
- * to do a full cache flush to ensure that writebacks don't corrupt
- * data placed into these pages via the new mappings.
- */
-#define flush_cache_vmap(start, end) flush_cache_all()
-#define flush_cache_vunmap(start, end) flush_cache_all()
-
-/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
@@ -444,4 +435,29 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
dmac_inv_range(start, start + size);
}
+/*
+ * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+ * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
+ * caches, since the direct-mappings of these pages may contain cached
+ * data, we need to do a full cache flush to ensure that writebacks
+ * don't corrupt data placed into these pages via the new mappings.
+ */
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (!cache_is_vipt_nonaliasing())
+ flush_cache_all();
+ else
+ /*
+ * set_pte_at() called from vmap_pte_range() does not
+ * have a DSB after cleaning the cache line.
+ */
+ dsb();
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (!cache_is_vipt_nonaliasing())
+ flush_cache_all();
+}
+
#endif