diff options
author | Will Deacon <will.deacon@arm.com> | 2018-02-15 12:14:56 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2018-02-16 19:13:57 +0100 |
commit | 20a004e7b017cce282a46ac5d02c2b9c6b9bb1fa (patch) | |
tree | 390bf8546ee581e54103e2bdc5876ac56cceefca /arch/arm64/mm/kasan_init.c | |
parent | arm64: proc: Set PTE_NG for table entries to avoid traversing them twice (diff) | |
download | linux-20a004e7b017cce282a46ac5d02c2b9c6b9bb1fa.tar.xz linux-20a004e7b017cce282a46ac5d02c2b9c6b9bb1fa.zip |
arm64: mm: Use READ_ONCE/WRITE_ONCE when accessing page tables
In many cases, page tables can be accessed concurrently by either another
CPU (due to things like fast gup) or by the hardware page table walker
itself, which may set access/dirty bits. In such cases, it is important
to use READ_ONCE/WRITE_ONCE when accessing page table entries so that
entries cannot be torn, merged or subject to apparent loss of coherence
due to compiler transformations.
Whilst there are some scenarios where this cannot happen (e.g. pinned
kernel mappings for the linear region), the overhead of using READ_ONCE
/WRITE_ONCE everywhere is minimal and makes the code an awful lot easier
to reason about. This patch consistently uses these macros in the arch
code, as well as explicitly namespacing pointers to page table entries
from the entries themselves by using adopting a 'p' suffix for the former
(as is sometimes used elsewhere in the kernel source).
Tested-by: Yury Norov <ynorov@caviumnetworks.com>
Tested-by: Richard Ruigrok <rruigrok@codeaurora.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm/kasan_init.c')
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 6e02e6fb4c7b..dabfc1ecda3d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) return __pa(p); } -static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node, +static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) { - if (pmd_none(*pmd)) { + if (pmd_none(READ_ONCE(*pmdp))) { phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte) : kasan_alloc_zeroed_page(node); - __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); + __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); } - return early ? pte_offset_kimg(pmd, addr) - : pte_offset_kernel(pmd, addr); + return early ? pte_offset_kimg(pmdp, addr) + : pte_offset_kernel(pmdp, addr); } -static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node, +static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) { - if (pud_none(*pud)) { + if (pud_none(READ_ONCE(*pudp))) { phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd) : kasan_alloc_zeroed_page(node); - __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); } - return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr); + return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); } -static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node, +static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node, bool early) { - if (pgd_none(*pgd)) { + if (pgd_none(READ_ONCE(*pgdp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud) : kasan_alloc_zeroed_page(node); - __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE); + __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); } - return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr); + return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr); } -static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr, +static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pte_t *pte = kasan_pte_offset(pmd, addr, node, early); + pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); do { phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page) : kasan_alloc_zeroed_page(node); next = addr + PAGE_SIZE; - set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (pte++, addr = next, addr != end && pte_none(*pte)); + set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); } -static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr, +static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early); + pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); do { next = pmd_addr_end(addr, end); - kasan_pte_populate(pmd, addr, next, node, early); - } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); + kasan_pte_populate(pmdp, addr, next, node, early); + } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); } -static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr, +static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pud_t *pud = kasan_pud_offset(pgd, addr, node, early); + pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early); do { next = pud_addr_end(addr, end); - kasan_pmd_populate(pud, addr, next, node, early); - } while (pud++, addr = next, addr != end && pud_none(*pud)); + kasan_pmd_populate(pudp, addr, next, node, early); + } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); } static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pgd_t *pgd; + pgd_t *pgdp; - pgd = pgd_offset_k(addr); + pgdp = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - kasan_pud_populate(pgd, addr, next, node, early); - } while (pgd++, addr = next, addr != end); + kasan_pud_populate(pgdp, addr, next, node, early); + } while (pgdp++, addr = next, addr != end); } /* The early shadow maps everything to a single page of zeroes */ @@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end, */ void __init kasan_copy_shadow(pgd_t *pgdir) { - pgd_t *pgd, *pgd_new, *pgd_end; + pgd_t *pgdp, *pgdp_new, *pgdp_end; - pgd = pgd_offset_k(KASAN_SHADOW_START); - pgd_end = pgd_offset_k(KASAN_SHADOW_END); - pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); + pgdp = pgd_offset_k(KASAN_SHADOW_START); + pgdp_end = pgd_offset_k(KASAN_SHADOW_END); + pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); do { - set_pgd(pgd_new, *pgd); - } while (pgd++, pgd_new++, pgd != pgd_end); + set_pgd(pgdp_new, READ_ONCE(*pgdp)); + } while (pgdp++, pgdp_new++, pgdp != pgdp_end); } static void __init clear_pgds(unsigned long start, |