summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2009-09-03 22:45:59 +0200
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-04 20:20:07 +0200
commit7929eb9cf643ae416e5081b2a6fa558d37b9854c (patch)
treec4cbaa5ccdd1c929eba802374d80191b6f90e16c /arch/arm
parentARM: 5688/1: ks8695_serial: disable_irq() lockup (diff)
downloadlinux-7929eb9cf643ae416e5081b2a6fa558d37b9854c.tar.xz
linux-7929eb9cf643ae416e5081b2a6fa558d37b9854c.zip
ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() with highmem
Let's suppose a highmem page is kmap'd with kmap(). A pkmap entry is used, the page mapped to it, and the virtual cache is dirtied. Then kunmap() is used which does virtually nothing except for decrementing a usage count. Then, let's suppose the _same_ page gets mapped using kmap_atomic(). It is therefore mapped onto a fixmap entry instead, which has a different virtual address unaware of the dirty cache data for that page sitting in the pkmap mapping. Fortunately it is easy to know if a pkmap mapping still exists for that page and use it directly with kmap_atomic(), thanks to kmap_high_get(). And actual testing with a printk in the added code path shows that this condition is actually met *extremely* frequently. Seems that we've been quite lucky that things have worked so well with highmem so far. Cc: stable@kernel.org Signed-off-by: Nicolas Pitre <nico@marvell.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/highmem.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index a34954d9df7d..73cae57fa707 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned int idx;
unsigned long vaddr;
+ void *kmap;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
+ kmap = kmap_high_get(page);
+ if (kmap)
+ return kmap;
+
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#else
(void) idx; /* to kill a warning */
#endif
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
}