summaryrefslogtreecommitdiffstats
path: root/arch/i386/mm/highmem.c
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2006-10-01 08:29:35 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-01 09:39:34 +0200
commit23002d88be309a7c78db69363c9d933a29a3b0bb (patch)
treeecd99ef70c0f38ff81bf2e3c6d7caeed2b88b41e /arch/i386/mm/highmem.c
parent[PATCH] paravirt: combine flush accessed dirty.patch (diff)
downloadlinux-23002d88be309a7c78db69363c9d933a29a3b0bb.tar.xz
linux-23002d88be309a7c78db69363c9d933a29a3b0bb.zip
[PATCH] paravirt: kpte flush
Create a new PTE function which combines clearing a kernel PTE with the subsequent flush. This allows the two to be easily combined into a single hypercall or paravirt-op. More subtly, reverse the order of the flush for kmap_atomic. Instead of flushing on establishing a mapping, flush on clearing a mapping. This eliminates the possibility of leaving stale kmap entries which may still have valid TLB mappings. This is required for direct mode hypervisors, which need to reprotect all mappings of a given page when changing the page type from a normal page to a protected page (such as a page table or descriptor table page). But it also provides some nicer semantics for real hardware, by providing extra debug-proofing against using stale mappings, as well as ensuring that no stale mappings exist when changing the cacheability attributes of a page, which could lead to cache conflicts when two different types of mappings exist for the same page. Signed-off-by: Zachary Amsden <zach@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mm/highmem.c')
-rw-r--r--arch/i386/mm/highmem.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index ba44000b9069..f9f647cdbc7b 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
if (!pte_none(*(kmap_pte-idx)))
BUG();
-#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+#ifdef CONFIG_DEBUG_HIGHMEM
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
dec_preempt_count();
preempt_check_resched();
@@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
-
+#endif
/*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
+ * Force other mappings to Oops if they'll try to access this pte
+ * without first remap it. Keeping stale mappings around is a bad idea
+ * also, in case the page changes cacheability attributes or becomes
+ * a protected page in a hypervisor.
*/
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
dec_preempt_count();
preempt_check_resched();
@@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
- __flush_tlb_one(vaddr);
return (void*) vaddr;
}