summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2019-06-10 14:41:07 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2019-06-10 14:55:34 +0200
commit9b604722059039a1a3ff69fb8dfd024264046024 (patch)
tree1e4a434e73e4d583d935a5b3e34825c0dd0fb86a /arch/arm64
parentarm64/mm: Refactor __do_page_fault() (diff)
downloadlinux-9b604722059039a1a3ff69fb8dfd024264046024.tar.xz
linux-9b604722059039a1a3ff69fb8dfd024264046024.zip
arm64: mm: avoid redundant READ_ONCE(*ptep)
In set_pte_at(), we read the old pte value so that it can be passed into checks for racy hw updates. These checks are only performed for CONFIG_DEBUG_VM, and the value is not used otherwise. Since we read the pte value with READ_ONCE(), the compiler cannot elide the redundant read for !CONFIG_DEBUG_VM kernels. Let's ameliorate matters by moving the read and the checks into a helper, __check_racy_pte_update(), which only performs the read when the value will be used. This also allows us to reformat the conditions for clarity. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/pgtable.h47
1 files changed, 30 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6478dd121228..2023eab19d73 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -246,29 +246,42 @@ extern void __sync_icache_dcache(pte_t pteval);
*
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+
+static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
+ pte_t pte)
{
pte_t old_pte;
- if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
- __sync_icache_dcache(pte);
+ if (!IS_ENABLED(CONFIG_DEBUG_VM))
+ return;
+
+ old_pte = READ_ONCE(*ptep);
+
+ if (!pte_valid(old_pte) || !pte_valid(pte))
+ return;
+ if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
+ return;
/*
- * If the existing pte is valid, check for potential race with
- * hardware updates of the pte (ptep_set_access_flags safely changes
- * valid ptes without going through an invalid entry).
+ * Check for potential race with hardware updates of the pte
+ * (ptep_set_access_flags safely changes valid ptes without going
+ * through an invalid entry).
*/
- old_pte = READ_ONCE(*ptep);
- if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
- (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
- VM_WARN_ONCE(!pte_young(pte),
- "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
- __func__, pte_val(old_pte), pte_val(pte));
- VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
- "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
- __func__, pte_val(old_pte), pte_val(pte));
- }
+ VM_WARN_ONCE(!pte_young(pte),
+ "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(old_pte), pte_val(pte));
+ VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
+ "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(old_pte), pte_val(pte));
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
+ __sync_icache_dcache(pte);
+
+ __check_racy_pte_update(mm, ptep, pte);
set_pte(ptep, pte);
}