summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 11:36:17 +0200
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 11:36:17 +0200
commitf3c2575818fab45f8609e4aef2e43ab02b3a142e (patch)
treea4924d7dd8f8df229e36fab24ccccfe12437509b
parentsh: dma-mapping compile fixes. (diff)
downloadlinux-f3c2575818fab45f8609e4aef2e43ab02b3a142e.tar.xz
linux-f3c2575818fab45f8609e4aef2e43ab02b3a142e.zip
sh: Calculate shm alignment at runtime.
Set the SHM alignment at runtime, based off of probed cache desc. Optimize get_unmapped_area() to only colour align shared mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/cpu/init.c5
-rw-r--r--arch/sh/kernel/sys_sh.c54
-rw-r--r--include/asm-sh/cacheflush.h2
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h8
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h3
-rw-r--r--include/asm-sh/page.h2
-rw-r--r--include/asm-sh/pgtable.h2
7 files changed, 43 insertions, 33 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 731dd61419dd..bfb90eb0b7a6 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/page.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
@@ -198,6 +199,10 @@ asmlinkage void __init sh_cpu_init(void)
/* Init the cache */
cache_init();
+ shm_align_mask = max_t(unsigned long,
+ cpu_data->dcache.way_size - 1,
+ PAGE_SIZE - 1);
+
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 0ee7bf4cb238..b68ff705f067 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -21,6 +21,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
+#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
@@ -44,11 +45,16 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
return error;
}
-#if defined(HAVE_ARCH_UNMAPPED_AREA) && defined(CONFIG_MMU)
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+
+EXPORT_SYMBOL(shm_align_mask);
+
/*
- * To avoid cache alias, we map the shard page with same color.
+ * To avoid cache aliases, we map the shared page with same color.
*/
-#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
@@ -56,43 +62,52 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
+ int do_colour_align;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
return -EINVAL;
return addr;
}
- if (len > TASK_SIZE)
+ if (unlikely(len > TASK_SIZE))
return -ENOMEM;
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
if (addr) {
- if (flags & MAP_PRIVATE)
- addr = PAGE_ALIGN(addr);
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
else
- addr = COLOUR_ALIGN(addr);
+ addr = PAGE_ALIGN(addr);
+
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
- if (len <= mm->cached_hole_size) {
+
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
mm->cached_hole_size = 0;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ start_addr = addr = TASK_UNMAPPED_BASE;
}
- if (flags & MAP_PRIVATE)
- addr = PAGE_ALIGN(mm->free_area_cache);
- else
- addr = COLOUR_ALIGN(mm->free_area_cache);
- start_addr = addr;
full_search:
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(mm->free_area_cache);
+
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
+ if (unlikely(TASK_SIZE - len < addr)) {
/*
* Start a new search - just in case we missed
* some holes.
@@ -104,7 +119,7 @@ full_search:
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (likely(!vma || addr + len <= vma->vm_start)) {
/*
* Remember the place where we stopped the search:
*/
@@ -115,11 +130,10 @@ full_search:
mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
- if (!(flags & MAP_PRIVATE))
- addr = COLOUR_ALIGN(addr);
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
}
}
-#endif
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index 92930b4a40d4..07f62ec9ff0c 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -28,5 +28,7 @@ extern void __flush_invalidate_region(void *start, int size);
memcpy(dst, src, len); \
} while (0)
+#define HAVE_ARCH_UNMAPPED_AREA
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index 97f5a64c2ab8..03fde97a7fd0 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -64,12 +64,4 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define p3_cache_init() do { } while (0)
-/*
- * We provide our own get_unmapped_area to avoid cache aliasing issues
- * on SH7705 with a 32KB cache, and to page align addresses in the
- * non-aliasing case.
- */
-#define HAVE_ARCH_UNMAPPED_AREA
-
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
-
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index a95fc951aff6..515fd574267c 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -39,9 +39,6 @@ void p3_cache_init(void);
#define PG_mapped PG_arch_1
-/* We provide our own get_unmapped_area to avoid cache alias issue */
-#define HAVE_ARCH_UNMAPPED_AREA
-
#ifdef CONFIG_MMU
extern int remap_area_pages(unsigned long addr, unsigned long phys_addr,
unsigned long size, unsigned long flags);
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 3d8dae31a6f6..ca8b26d90475 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -44,6 +44,8 @@
extern void (*clear_page)(void *to);
extern void (*copy_page)(void *to, void *from);
+extern unsigned long shm_align_mask;
+
#ifdef CONFIG_MMU
extern void clear_page_slow(void *to);
extern void copy_page_slow(void *to, void *from);
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 41c559d8ba87..2c8682ad1012 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -340,6 +340,4 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
-
#endif /* __ASM_SH_PAGE_H */
-