summaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorAlexandre Ghiti <alexghiti@rivosinc.com>2023-12-12 22:34:56 +0100
committerDennis Zhou <dennis@kernel.org>2023-12-14 09:23:17 +0100
commit7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b (patch)
treef02aada180e46f0f74910d88d85b96c0f181061e /arch/xtensa
parentLinux 6.7-rc4 (diff)
downloadlinux-7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b.tar.xz
linux-7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b.zip
mm: Introduce flush_cache_vmap_early()
The pcpu setup when using the page allocator sets up a new vmalloc mapping very early in the boot process, so early that it cannot use the flush_cache_vmap() function which may depend on structures not yet initialized (for example in riscv, we currently send an IPI to flush other cpus TLB). But on some architectures, we must call flush_cache_vmap(): for example, in riscv, some uarchs can cache invalid TLB entries so we need to flush the new established mapping to avoid taking an exception. So fix this by introducing a new function flush_cache_vmap_early() which is called right after setting the new page table entry and before accessing this new mapping. This new function implements a local flush tlb on riscv and is no-op for other architectures (same as today). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Dennis Zhou <dennis@kernel.org>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/cacheflush.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 785a00ce83c1..38bcecb0e457 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -116,8 +116,9 @@ void flush_cache_page(struct vm_area_struct*,
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
-#define flush_cache_vmap(start,end) flush_cache_all()
-#define flush_cache_vunmap(start,end) flush_cache_all()
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vmap_early(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) flush_cache_all()
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@@ -140,6 +141,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vmap_early(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0