diff options
author | John David Anglin <dave.anglin@bell.net> | 2022-03-19 20:04:15 +0100 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2022-03-21 13:30:54 +0100 |
commit | 53d862fac4a09b9c56cca0433fa9de5732fd05a1 (patch) | |
tree | 1b02d6ee02e1669bf73edc090b7085718ab53f8a /arch/parisc/kernel/cache.c | |
parent | parisc: Avoid flushing cache on cache-less machines (diff) | |
download | linux-53d862fac4a09b9c56cca0433fa9de5732fd05a1.tar.xz linux-53d862fac4a09b9c56cca0433fa9de5732fd05a1.zip |
parisc: Fix invalidate/flush vmap routines
Cache move-in for virtual accesses is controlled by the TLB. Thus,
we must generally purge TLB entries before flushing. The flush routines
must use TLB entries that inhibit cache move-in.
V2: Load physical address prior to flushing TLB. In flush_cache_page,
flush TLB when flushing and purging.
V3: Don't flush when start equals end.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r-- | arch/parisc/kernel/cache.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index d41fee3a9874..456e879d34a8 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -611,8 +611,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { if (pfn_valid(pfn)) { + flush_tlb_page(vma, vmaddr); if (likely(vma->vm_mm->context.space_id)) { - flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } else { __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); @@ -624,6 +624,7 @@ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + unsigned long flags, physaddr; if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { @@ -632,8 +633,14 @@ void flush_kernel_vmap_range(void *vaddr, int size) return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + while (start < end) { + physaddr = lpa(start); + purge_tlb_start(flags); + pdtlb(SR_KERNEL, start); + purge_tlb_end(flags); + flush_dcache_page_asm(physaddr, start); + start += PAGE_SIZE; + } } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -641,6 +648,7 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; + unsigned long flags, physaddr; if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && (unsigned long)size >= parisc_cache_flush_threshold) { @@ -649,7 +657,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + while (start < end) { + physaddr = lpa(start); + purge_tlb_start(flags); + pdtlb(SR_KERNEL, start); + purge_tlb_end(flags); + purge_dcache_page_asm(physaddr, start); + start += PAGE_SIZE; + } } EXPORT_SYMBOL(invalidate_kernel_vmap_range); |