From 8e1964a98920100f113ad26f78220ea706dbfa2b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 20 Jan 2011 12:54:18 -0600 Subject: [PARISC] fix vmap flush/invalidate On parisc, we never implemented invalidate_kernel_vmap_range() because it was unnecessary for the xfs use case. However, we do need to implement an invalidate for the opposite use case (which occurred in a recent NFS change) where the user wants to read through the vmap range and write via the kernel address. There's an additional complexity to this in that if the page has no userspace mappings, it might have dirty cache lines in the kernel (indicated by the PG_dcache_dirty bit). In order to get full coherency, we need to flush these pages through the kernel mapping before invalidating the vmap range. Signed-off-by: James Bottomley --- arch/parisc/include/asm/cacheflush.h | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f388a85bba11..7344e1d304af 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -37,6 +37,13 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); +#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +void flush_kernel_dcache_page_addr(void *addr); +static inline void flush_kernel_dcache_page(struct page *page) +{ + flush_kernel_dcache_page_addr(page_address(page)); +} + #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); /* vmap range flushes and invalidates. Architecturally, we don't need @@ -50,6 +57,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size) } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { + unsigned long start = (unsigned long)vaddr; + void *cursor = vaddr; + + for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { + struct page *page = vmalloc_to_page(cursor); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + flush_kernel_dcache_page(page); + } + flush_kernel_dcache_range_asm(start, start + size); } #define flush_cache_vmap(start, end) flush_cache_all() @@ -98,13 +115,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma flush_user_dcache_page(vmaddr); } -#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -void flush_kernel_dcache_page_addr(void *addr); -static inline void flush_kernel_dcache_page(struct page *page) -{ - flush_kernel_dcache_page_addr(page_address(page)); -} - #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif -- cgit v1.2.3 From 9804c9eaeacfe78651052c5ddff31099f60ef78c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 7 Feb 2011 19:28:01 +0100 Subject: [PARISC] fix per-cpu flag problem in the cpu affinity checkers The CHECK_IRQ_PER_CPU is wrong, it should be checking irq_to_desc(irq)->status not just irq. Signed-off-by: Thomas Gleixner Cc: stable@kernel.org Signed-off-by: James Bottomley --- arch/parisc/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index d7d94b845dc2..3948f1dd455a 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -108,7 +108,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) int cpu_dest; /* timer and ipi have to always be received on all CPUs */ - if (CHECK_IRQ_PER_CPU(irq)) { + if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); -- cgit v1.2.3