summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2008-02-16 23:34:25 +0100
committerRalf Baechle <ralf@linux-mips.org>2008-02-19 18:01:31 +0100
commit9a74b3eb22f2d67a5681301f52aca5b7703382c8 (patch)
tree3ef7b8713edfccc96ad1ce57a431828656d92d82 /arch
parent[MIPS] Fix broken rm7000/rm9000 interrupt handling (diff)
downloadlinux-9a74b3eb22f2d67a5681301f52aca5b7703382c8.tar.xz
linux-9a74b3eb22f2d67a5681301f52aca5b7703382c8.zip
[MIPS] Fix buggy invocations of kmap_coherent()
kmap_coherent will only work correctly if the page it is called on is not marked dirty. If it's dirty the kernel address of the page should be used instead of a temporary mapping. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/cache.c15
-rw-r--r--arch/mips/mm/init.c9
2 files changed, 16 insertions, 8 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 81f30ac2bff9..6a24651971df 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -92,12 +92,17 @@ EXPORT_SYMBOL(__flush_dcache_page);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
- if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
- void *kaddr;
+ unsigned long addr = (unsigned long) page_address(page);
- kaddr = kmap_coherent(page, vmaddr);
- flush_data_cache_page((unsigned long)kaddr);
- kunmap_coherent();
+ if (pages_do_alias(addr, vmaddr)) {
+ if (page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else
+ flush_data_cache_page(addr);
}
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 480dec04f552..c7aed133d11d 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -211,7 +211,8 @@ void copy_user_highpage(struct page *to, struct page *from,
void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1);
- if (cpu_has_dc_aliases && page_mapped(from)) {
+ if (cpu_has_dc_aliases &&
+ page_mapped(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
@@ -234,7 +235,8 @@ void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases && page_mapped(page)) {
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
@@ -253,7 +255,8 @@ void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases && page_mapped(page)) {
+ if (cpu_has_dc_aliases &&
+ page_mapped(page) && !Page_dcache_dirty(page)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();