summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@linaro.org>2014-10-10 00:29:23 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 04:26:01 +0200
commit5e5f6dc10546f5c03bc572e3ba3089af30c66e2d (patch)
tree05c84c5be22dcb0bb752428dd4c726a0029bca8c /arch
parentarm: mm: enable RCU fast_gup (diff)
downloadlinux-5e5f6dc10546f5c03bc572e3ba3089af30c66e2d.tar.xz
linux-5e5f6dc10546f5c03bc572e3ba3089af30c66e2d.zip
arm64: mm: enable HAVE_RCU_TABLE_FREE logic
In order to implement fast_get_user_pages we need to ensure that the page table walker is protected from page table pages being freed from under it. This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to address spaces with multiple users will be call_rcu_sched freed. Meaning that disabling interrupts will block the free and protect the fast gup page walker. Signed-off-by: Steve Capper <steve.capper@linaro.org> Tested-by: Dann Frazier <dann.frazier@canonical.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Will Deacon <will.deacon@arm.com> Cc: Christoffer Dall <christoffer.dall@linaro.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/tlb.h20
2 files changed, 18 insertions, 3 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e96cbe84d5ae..1ffd9a05206b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -57,6 +57,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 62731ef9749a..a82c0c5c8b52 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -23,6 +23,20 @@
#include <asm-generic/tlb.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
+static inline void __tlb_remove_table(void *_table)
+{
+ free_page_and_swap_cache((struct page *)_table);
+}
+#else
+#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
/*
* There's three ways the TLB shootdown code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
@@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, pte);
+ tlb_remove_entry(tlb, pte);
}
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
@@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pmdp));
+ tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pudp));
+ tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif