diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2020-06-09 06:33:10 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 18:39:14 +0200 |
commit | 974b9b2c68f3d35a65e80af9657fe378d2439b60 (patch) | |
tree | 67332a4308b56498008ba8687b99c2ca26215866 /arch/arm64 | |
parent | mm: pgtable: add shortcuts for accessing kernel PMD and PTE (diff) | |
download | linux-974b9b2c68f3d35a65e80af9657fe378d2439b60.tar.xz linux-974b9b2c68f3d35a65e80af9657fe378d2439b60.zip |
mm: consolidate pte_index() and pte_offset_*() definitions
All architectures define pte_index() as
(address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)
and all architectures define pte_offset_kernel() as an entry in the array
of PTEs indexed by the pte_index().
For the most architectures the pte_offset_kernel() implementation relies
on the availability of pmd_page_vaddr() that converts a PMD entry value to
the virtual address of the page containing PTEs array.
Let's move x86 definitions of the PTE accessors to the generic place in
<linux/pgtable.h> and then simply drop the respective definitions from the
other architectures.
The architectures that didn't provide pmd_page_vaddr() are updated to have
that defined.
The generic implementation of pte_offset_kernel() can be overridden by an
architecture and alpha makes use of this because it has special ordering
requirements for its version of pte_offset_kernel().
[rppt@linux.ibm.com: v2]
Link: http://lkml.kernel.org/r/20200514170327.31389-11-rppt@kernel.org
[rppt@linux.ibm.com: update]
Link: http://lkml.kernel.org/r/20200514170327.31389-12-rppt@kernel.org
[rppt@linux.ibm.com: update]
Link: http://lkml.kernel.org/r/20200514170327.31389-13-rppt@kernel.org
[akpm@linux-foundation.org: fix x86 warning]
[sfr@canb.auug.org.au: fix powerpc build]
Link: http://lkml.kernel.org/r/20200607153443.GB738695@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-10-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 36 | ||||
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 4 | ||||
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 8 |
4 files changed, 21 insertions, 29 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 437c92ab2081..6dbd267ab931 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -506,15 +506,13 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) return __pmd_to_phys(pmd); } -static inline void pte_unmap(pte_t *pte) { } +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(pmd_page_paddr(pmd)); +} /* Find an entry in the third-level page table. */ -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) -#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) @@ -568,11 +566,13 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) return __pud_to_phys(pud); } -/* Find an entry in the second-level page table. */ -#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)__va(pud_page_paddr(pud)); +} +/* Find an entry in the second-level page table. */ #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) -#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) @@ -626,11 +626,13 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d) return __p4d_to_phys(p4d); } -/* Find an entry in the frst-level page table. */ -#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) +static inline unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)__va(p4d_page_paddr(p4d)); +} +/* Find an entry in the frst-level page table. */ #define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) -#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) #define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr)) @@ -657,16 +659,6 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) -/* to find an entry in a page-table-directory */ -#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) - -#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) - -#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) - #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 668903c94a2b..68e14152d6e9 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -188,7 +188,7 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page, pmd_t *pmdp; pte_t *ptep; - pgdp = pgd_offset_raw(trans_pgd, dst_addr); + pgdp = pgd_offset_pgd(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = (void *)get_safe_page(GFP_ATOMIC); if (!pudp) @@ -490,7 +490,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, unsigned long addr = start; pgd_t *src_pgdp = pgd_offset_k(start); - dst_pgdp = pgd_offset_raw(dst_pgdp, start); + dst_pgdp = pgd_offset_pgd(dst_pgdp, start); do { next = pgd_addr_end(addr, end); if (pgd_none(READ_ONCE(*src_pgdp))) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index ca77eb8a0ccd..7291b26ce788 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -190,7 +190,7 @@ void __init kasan_copy_shadow(pgd_t *pgdir) pgdp = pgd_offset_k(KASAN_SHADOW_START); pgdp_end = pgd_offset_k(KASAN_SHADOW_END); - pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); + pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START); do { set_pgd(pgdp_new, READ_ONCE(*pgdp)); } while (pgdp++, pgdp_new++, pgdp != pgdp_end); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7fbc6275329..990929c8837e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -341,7 +341,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, int flags) { unsigned long addr, end, next; - pgd_t *pgdp = pgd_offset_raw(pgdir, virt); + pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); /* * If the virtual and physical address don't have the same offset @@ -663,13 +663,13 @@ static void __init map_kernel(pgd_t *pgdp) &vmlinux_initdata, 0, VM_NO_GUARD); map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); - if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { /* * The fixmap falls in a separate pgd to the kernel, and doesn't * live in the carveout for the swapper_pg_dir. We can simply * re-use the existing dir for the fixmap. */ - set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), + set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), READ_ONCE(*pgd_offset_k(FIXADDR_START))); } else if (CONFIG_PGTABLE_LEVELS > 3) { pgd_t *bm_pgdp; @@ -682,7 +682,7 @@ static void __init map_kernel(pgd_t *pgdp) * entry instead. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START); + bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); |