summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/nohash
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-05 21:39:30 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-05 21:39:30 +0200
commit7ae77150d94d3b535c7b85e6b3647113095e79bf (patch)
tree90fe894e7efd92898e813d88acfd4611d79be969 /arch/powerpc/include/asm/nohash
parentMerge tag 'modules-for-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentMerge branch 'topic/ppc-kvm' into next (diff)
downloadlinux-7ae77150d94d3b535c7b85e6b3647113095e79bf.tar.xz
linux-7ae77150d94d3b535c7b85e6b3647113095e79bf.zip
Merge tag 'powerpc-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - Support for userspace to send requests directly to the on-chip GZIP accelerator on Power9. - Rework of our lockless page table walking (__find_linux_pte()) to make it safe against parallel page table manipulations without relying on an IPI for serialisation. - A series of fixes & enhancements to make our machine check handling more robust. - Lots of plumbing to add support for "prefixed" (64-bit) instructions on Power10. - Support for using huge pages for the linear mapping on 8xx (32-bit). - Remove obsolete Xilinx PPC405/PPC440 support, and an associated sound driver. - Removal of some obsolete 40x platforms and associated cruft. - Initial support for booting on Power10. - Lots of other small features, cleanups & fixes. Thanks to: Alexey Kardashevskiy, Alistair Popple, Andrew Donnellan, Andrey Abramov, Aneesh Kumar K.V, Balamuruhan S, Bharata B Rao, Bulent Abali, Cédric Le Goater, Chen Zhou, Christian Zigotzky, Christophe JAILLET, Christophe Leroy, Dmitry Torokhov, Emmanuel Nicolet, Erhard F., Gautham R. Shenoy, Geoff Levand, George Spelvin, Greg Kurz, Gustavo A. R. Silva, Gustavo Walbon, Haren Myneni, Hari Bathini, Joel Stanley, Jordan Niethe, Kajol Jain, Kees Cook, Leonardo Bras, Madhavan Srinivasan., Mahesh Salgaonkar, Markus Elfring, Michael Neuling, Michal Simek, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Oliver O'Halloran, Paul Mackerras, Pingfan Liu, Qian Cai, Ram Pai, Raphael Moreira Zinsly, Ravi Bangoria, Sam Bobroff, Sandipan Das, Segher Boessenkool, Stephen Rothwell, Sukadev Bhattiprolu, Tyrel Datwyler, Wolfram Sang, Xiongfeng Wang. * tag 'powerpc-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (299 commits) powerpc/pseries: Make vio and ibmebus initcalls pseries specific cxl: Remove dead Kconfig options powerpc: Add POWER10 architected mode powerpc/dt_cpu_ftrs: Add MMA feature powerpc/dt_cpu_ftrs: Enable Prefixed Instructions powerpc/dt_cpu_ftrs: Advertise support for ISA v3.1 if selected powerpc: Add support for ISA v3.1 powerpc: Add new HWCAP bits powerpc/64s: Don't set FSCR bits in INIT_THREAD powerpc/64s: Save FSCR to init_task.thread.fscr after feature init powerpc/64s: Don't let DT CPU features set FSCR_DSCR powerpc/64s: Don't init FSCR_DSCR in __init_FSCR() powerpc/32s: Fix another build failure with CONFIG_PPC_KUAP_DEBUG powerpc/module_64: Use special stub for _mcount() with -mprofile-kernel powerpc/module_64: Simplify check for -mprofile-kernel ftrace relocations powerpc/module_64: Consolidate ftrace code powerpc/32: Disable KASAN with pages bigger than 16k powerpc/uaccess: Don't set KUEP by default on book3s/32 powerpc/uaccess: Don't set KUAP by default on book3s/32 powerpc/8xx: Reduce time spent in allow_user_access() and friends ...
Diffstat (limited to 'arch/powerpc/include/asm/nohash')
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h32
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h90
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h120
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h23
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/slice.h20
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h28
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h4
8 files changed, 94 insertions, 227 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index a46616937d20..e752a5807a59 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
- return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+ return PAGE_SHIFT_8M;
}
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned int pdshift)
{
- unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+ unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
return hugepd_page(hpd) + idx;
}
@@ -32,8 +32,12 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
+static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
}
static inline int check_and_get_huge_psize(int shift)
@@ -41,4 +45,24 @@ static inline int check_and_get_huge_psize(int shift)
return shift_to_mmu_psize(shift);
}
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 1);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
+ unsigned long set = pte_val(pte_wrprotect(__pte(0)));
+
+ pte_update(mm, addr, ptep, clr, set, 1);
+}
+
#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 76af5b0cb16e..1d9ac0f9c794 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -19,7 +19,6 @@
#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MI_RESETVAL 0x00000000 /* Value of register at reset */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
@@ -37,16 +36,16 @@
* Therefore, we define 2 APG groups. lsb is _PMD_USER
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MI_APG_INIT 0x4fffffff
+#define MI_APG_INIT 0x40000000
/*
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MI_APG_KUEP 0x6fffffff
+#define MI_APG_KUEP 0x60000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -95,7 +94,6 @@
#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MD_RESETVAL 0x04000000 /* Value of register at reset */
#define SPRN_M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
@@ -117,16 +115,16 @@
* Therefore, we define 2 APG groups. lsb is _PMD_USER
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MD_APG_INIT 0x4fffffff
+#define MD_APG_INIT 0x40000000
/*
* 0 => No user => 01 (all accesses performed according to page definition)
* 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MD_APG_KUAP 0x6fffffff
+#define MD_APG_KUAP 0x60000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
@@ -178,12 +176,6 @@
*/
#define SPRN_M_TW 799
-#ifdef CONFIG_PPC_MM_SLICES
-#include <asm/nohash/32/slice.h>
-#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
-#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE
-#endif
-
#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
#elif defined(CONFIG_PPC_16K_PAGES)
@@ -201,71 +193,15 @@
#include <linux/mmdebug.h>
-struct slice_mask {
- u64 low_slices;
- DECLARE_BITMAP(high_slices, 0);
-};
+void mmu_pin_tlb(unsigned long top, bool readonly);
typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
-#ifdef CONFIG_PPC_MM_SLICES
- u16 user_psize; /* page size index */
- unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
- unsigned char high_slices_psize[0];
- unsigned long slb_addr_limit;
- struct slice_mask mask_base_psize; /* 4k or 16k */
- struct slice_mask mask_512k;
- struct slice_mask mask_8m;
-#endif
void *pte_frag;
} mm_context_t;
-#ifdef CONFIG_PPC_MM_SLICES
-static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
-{
- return ctx->user_psize;
-}
-
-static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
-{
- ctx->user_psize = user_psize;
-}
-
-static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
-{
- return ctx->low_slices_psize;
-}
-
-static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
-{
- return ctx->high_slices_psize;
-}
-
-static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
-{
- return ctx->slb_addr_limit;
-}
-
-static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
-{
- ctx->slb_addr_limit = limit;
-}
-
-static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
-{
- if (psize == MMU_PAGE_512K)
- return &ctx->mask_512k;
- if (psize == MMU_PAGE_8M)
- return &ctx->mask_8m;
-
- BUG_ON(psize != mmu_virtual_psize);
-
- return &ctx->mask_base_psize;
-}
-#endif /* CONFIG_PPC_MM_SLICE */
-
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
@@ -304,13 +240,7 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
}
/* patch sites */
-extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
-extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
-extern s32 patch__fixupdar_linmem_top;
-extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
-
-extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
-extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
+extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 3d0bc99dd520..af7f13cf90cf 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
-#include <asm/asm-405.h>
#ifdef CONFIG_44x
extern int icache_44x_need_flush;
@@ -109,13 +108,13 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
#ifdef CONFIG_KASAN_VMALLOC
-#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END ioremap_bot
#endif
@@ -165,7 +164,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#ifndef __ASSEMBLY__
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~0, 0); } while (0)
+ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
#ifndef pte_mkwrite
static inline pte_t pte_mkwrite(pte_t pte)
@@ -205,6 +204,12 @@ static inline void pmd_clear(pmd_t *pmdp)
}
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/*
* PTE updates. This function is called whenever an existing
@@ -220,66 +225,42 @@ static inline void pmd_clear(pmd_t *pmdp)
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
+ *
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
*/
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+#ifdef CONFIG_PPC_8xx
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__("\
-1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long old = pte_val(*p);
- unsigned long new = (old & ~clr) | set;
-
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
- p->pte = p->pte1 = p->pte2 = p->pte3 = new;
-#else
- *p = __pte(new);
-#endif
-#endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t *entry = &p->pte;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
+
+ if (!huge)
+ num = PAGE_SIZE / SZ_4K;
+ else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M)
+ num = SZ_512K / SZ_4K;
+ else
+ num = 1;
+
+ for (i = 0; i < num; i++, entry++, new += SZ_4K)
+ *entry = new;
-#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
-#endif
return old;
}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+#else
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long long old;
- unsigned long tmp;
-
- __asm__ __volatile__("\
-1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long long old = pte_val(*p);
- *p = __pte((old & ~(unsigned long long)clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ *p = __pte(new);
#ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
@@ -287,23 +268,24 @@ static inline unsigned long long pte_update(pte_t *p,
#endif
return old;
}
-#endif /* CONFIG_PTE_64BIT */
+#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~0, 0));
+ return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -313,7 +295,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
unsigned long set = pte_val(pte_wrprotect(__pte(0)));
- pte_update(ptep, clr, set);
+ pte_update(mm, addr, ptep, clr, set, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -325,8 +307,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
unsigned long set = pte_val(entry) & pte_val(pte_set);
unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
- pte_update(ptep, clr, set);
+ pte_update(vma->vm_mm, address, ptep, clr, set, huge);
flush_tlb_page(vma, address);
}
@@ -358,13 +341,6 @@ static inline int pte_young(pte_t pte)
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 12c6811e344b..2d3153cfc0d7 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -44,9 +44,8 @@
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
-#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
@@ -58,8 +57,8 @@
#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
@@ -85,21 +84,5 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#ifndef __ASSEMBLY__
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE));
-}
-
-#define pte_wrprotect pte_wrprotect
-
-static inline pte_t pte_mkclean(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
-}
-
-#define pte_mkclean pte_mkclean
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index c9e4b2d90f65..66f403a7da44 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -46,6 +46,8 @@
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
+#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+
/* cache related flags non existing on 8xx */
#define _PAGE_COHERENT 0
#define _PAGE_WRITETHRU 0
@@ -128,7 +130,7 @@ static inline pte_t pte_mkuser(pte_t pte)
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPS);
+ return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
}
#define pte_mkhuge pte_mkhuge
diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
deleted file mode 100644
index 39eb0154ae2d..000000000000
--- a/arch/powerpc/include/asm/nohash/32/slice.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
-#define _ASM_POWERPC_NOHASH_32_SLICE_H
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#define SLICE_LOW_SHIFT 26 /* 64 slices */
-#define SLICE_LOW_TOP (0x100000000ull)
-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
-
-#define SLICE_HIGH_SHIFT 0
-#define SLICE_NUM_HIGH 0ul
-#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
-
-#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index b360f262b9c6..3424381b81da 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -211,22 +211,9 @@ static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long set,
int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3 # pte_update\n\
- andc %1,%0,%4 \n\
- or %1,%1,%6\n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
- : "cc" );
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte((old & ~clr) | set);
-#endif
+
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
@@ -310,21 +297,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%4\n\
- or %0,%3,%0\n\
- stdcx. %0,0,%4\n\
- bne- 1b"
- :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep)
- :"cc");
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old | bits);
-#endif
flush_tlb_page(vma, address);
}
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 7fed9dc0f147..50a4b0bb8d16 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -130,12 +130,10 @@ static inline pte_t pte_exprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
-#ifndef pte_mkclean
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
-#endif
static inline pte_t pte_mkold(pte_t pte)
{
@@ -267,7 +265,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd_val(hpd) & 0x4) != 0);
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
#else
/* We clear the top bit to indicate hugepd */
return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);