diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:09 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:34:09 +0100 |
commit | 4c61afcdb2cd4be299c1442b33adf312b695e2d7 (patch) | |
tree | 8f51b96e2f6520c63b7c54dd84f4840ab9157590 | |
parent | x86: optimize clflush (diff) | |
download | linux-4c61afcdb2cd4be299c1442b33adf312b695e2d7.tar.xz linux-4c61afcdb2cd4be299c1442b33adf312b695e2d7.zip |
x86: fix clflush_page_range logic
only present ptes must be flushed.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/pageattr.c | 31 | ||||
-rw-r--r-- | include/asm-x86/cacheflush.h | 2 |
2 files changed, 25 insertions, 8 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bbfc8e2466ab..97ec9e7d29d9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end) * Flushing functions */ - /** * clflush_cache_range - flush a cache range with clflush * @addr: virtual start address @@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end) * clflush is an unordered instruction which needs fencing with mfence * to avoid ordering issues. */ -void clflush_cache_range(void *addr, int size) +void clflush_cache_range(void *vaddr, unsigned int size) { - int i; + void *vend = vaddr + size - 1; mb(); - for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) - clflush(addr+i); + + for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) + clflush(vaddr); + /* + * Flush any possible final partial cacheline: + */ + clflush(vend); + mb(); } @@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg) __flush_tlb_all(); } -static void cpa_flush_range(unsigned long addr, int numpages) +static void cpa_flush_range(unsigned long start, int numpages) { + unsigned int i, level; + unsigned long addr; + BUG_ON(irqs_disabled()); + WARN_ON(PAGE_ALIGN(start) != start); on_each_cpu(__cpa_flush_range, NULL, 1, 1); @@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages) * will cause all other CPUs to flush the same * cachelines: */ - clflush_cache_range((void *) addr, numpages * PAGE_SIZE); + for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { + pte_t *pte = lookup_address(addr, &level); + + /* + * Only flush present addresses: + */ + if (pte && pte_present(*pte)) + clflush_cache_range((void *) addr, PAGE_SIZE); + } } /* diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 3e74aff90809..8dd8c5e3cc7f 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -42,7 +42,7 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); -void clflush_cache_range(void *addr, int size); +void clflush_cache_range(void *addr, unsigned int size); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); |