summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
authorShihPo Hung <shihpo.hung@sifive.com>2019-06-17 06:26:17 +0200
committerPaul Walmsley <paul.walmsley@sifive.com>2019-06-17 12:44:44 +0200
commitbf587caae305ae3b4393077fb22c98478ee55755 (patch)
treeab59e43380e7b8e32b79875eb26a5c27e5046957 /arch/riscv
parentriscv: dts: add initial board data for the SiFive HiFive Unleashed (diff)
downloadlinux-bf587caae305ae3b4393077fb22c98478ee55755.tar.xz
linux-bf587caae305ae3b4393077fb22c98478ee55755.zip
riscv: mm: synchronize MMU after pte change
Because RISC-V compliant implementations can cache invalid entries in TLB, an SFENCE.VMA is necessary after changes to the page table. This patch adds an SFENCE.vma for the vmalloc_fault path. Signed-off-by: ShihPo Hung <shihpo.hung@sifive.com> [paul.walmsley@sifive.com: reversed tab->whitespace conversion, wrapped comment lines] Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: linux-riscv@lists.infradead.org Cc: stable@vger.kernel.org
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/mm/fault.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index cec8be9e2d6a..5b72e60c5a6b 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -29,6 +29,7 @@
#include <asm/pgalloc.h>
#include <asm/ptrace.h>
+#include <asm/tlbflush.h>
/*
* This routine handles page faults. It determines the address and the
@@ -278,6 +279,18 @@ vmalloc_fault:
pte_k = pte_offset_kernel(pmd_k, addr);
if (!pte_present(*pte_k))
goto no_context;
+
+ /*
+ * The kernel assumes that TLBs don't cache invalid
+ * entries, but in RISC-V, SFENCE.VMA specifies an
+ * ordering constraint, not a cache flush; it is
+ * necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would
+ * suffice, but the extra traps reduce
+ * performance. So, eagerly SFENCE.VMA.
+ */
+ local_flush_tlb_page(addr);
+
return;
}
}