diff options
Diffstat (limited to 'arch/powerpc/include')
69 files changed, 1480 insertions, 577 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 8a55eb8cc97b..61c6e8b200e8 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <asm/cmpxchg.h> #include <asm/barrier.h> +#include <asm/asm-const.h> /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -26,14 +27,14 @@ static __inline__ int atomic_read(const atomic_t *v) { int t; - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); return t; } static __inline__ void atomic_set(atomic_t *v, int i) { - __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); } #define ATOMIC_OP(op, asm_op) \ @@ -316,14 +317,14 @@ static __inline__ s64 atomic64_read(const atomic64_t *v) { s64 t; - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); return t; } static __inline__ void atomic64_set(atomic64_t *v, s64 i) { - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); } #define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index f53c42380832..aecfde829d5d 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -40,7 +40,7 @@ #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* The sub-arch has lwsync */ -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) +#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC) # define SMPWMB LWSYNC #else # define SMPWMB eieio diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 4a4d3afd5340..299ab33505a6 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -216,15 +216,34 @@ static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr) */ static inline int fls(unsigned int x) { - return 32 - __builtin_clz(x); + int lz; + + if (__builtin_constant_p(x)) + return x ? 32 - __builtin_clz(x) : 0; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; } #include <asm-generic/bitops/builtin-__fls.h> +/* + * 64-bit can do this using one cntlzd (count leading zeroes doubleword) + * instruction; for 32-bit we use the generic version, which does two + * 32-bit fls calls. + */ +#ifdef CONFIG_PPC64 static inline int fls64(__u64 x) { - return 64 - __builtin_clzll(x); + int lz; + + if (__builtin_constant_p(x)) + return x ? 64 - __builtin_clzll(x) : 0; + asm("cntlzd %0,%1" : "=r" (lz) : "r" (x)); + return 64 - lz; } +#else +#include <asm-generic/bitops/fls64.h> +#endif #ifdef CONFIG_PPC64 unsigned int __arch_hweight8(unsigned int w); diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 32fd4452e960..a0117a9d5b06 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -183,11 +183,7 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) unsigned long begin = regs->kuap & 0xf0000000; unsigned long end = regs->kuap << 28; - if (!is_write) - return false; - - return WARN(address < begin || address >= end, - "Bug: write fault blocked by segment registers !"); + return is_write && (address < begin || address >= end); } #endif /* CONFIG_PPC_KUAP */ diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 2e277ca0170f..685c589e723f 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -90,10 +90,11 @@ struct hash_pte { typedef struct { unsigned long id; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; void update_bats(void); +static inline void cleanup_cpu_mmu_context(void) { }; /* patch sites */ extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 1376be95e975..415ae29fa73a 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -240,8 +240,14 @@ extern void add_hash_page(unsigned context, unsigned long va, unsigned long pmdval); /* Flush an entry from the TLB/hash table */ -extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, - unsigned long address); +static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) +{ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) { + unsigned long ptephys = __pa(ptep) & PAGE_MASK; + + flush_hash_pages(mm->context.id, addr, ptephys, 1); + } +} /* * PTE updates. This function is called whenever an existing @@ -293,10 +299,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, { unsigned long old; old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); - if (old & _PAGE_HASHPTE) { - unsigned long ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(mm->context.id, addr, ptephys, 1); - } + if (old & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ @@ -524,9 +529,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, if (pte_val(*ptep) & _PAGE_HASHPTE) flush_hash_entry(mm, ptep, addr); __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ + stw%X0 %2,%0\n\ eieio\n\ - stw%U0%X0 %L2,%1" + stw%X1 %L2,%1" : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h index 068085b709fb..d941c06d4f2e 100644 --- a/arch/powerpc/include/asm/book3s/32/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h @@ -6,12 +6,69 @@ /* * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx */ -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void hash__flush_tlb_mm(struct mm_struct *mm); +void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end); + +#ifdef CONFIG_SMP +void _tlbie(unsigned long address); +#else +static inline void _tlbie(unsigned long address) +{ + asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); +} +#endif +void _tlbia(void); + +/* + * Called at the end of a mmu_gather operation to make sure the + * TLB flush is completely done. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + /* 603 needs to flush the whole TLB here since it doesn't use a hash table. */ + if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) + _tlbia(); +} + +static inline void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + start &= PAGE_MASK; + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + hash__flush_range(mm, start, end); + else if (end - start <= PAGE_SIZE) + _tlbie(start); + else + _tlbia(); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + hash__flush_tlb_mm(mm); + else + _tlbia(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + hash__flush_tlb_page(vma, vmaddr); + else + _tlbie(vmaddr); +} + +static inline void +flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + flush_range(vma->vm_mm, start, end); +} + +static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + flush_range(&init_mm, start, end); +} + static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h index 795010897e5d..f1e60d579f6c 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-pkey.h +++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h @@ -2,6 +2,9 @@ #ifndef _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H #define _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H +/* We use key 3 for KERNEL */ +#define HASH_DEFAULT_KERNEL_KEY (HPTE_R_KEY_BIT0 | HPTE_R_KEY_BIT1) + static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) { return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | @@ -11,13 +14,23 @@ static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); } -static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) +static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) { - return (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | - ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); + unsigned long pte_pkey; + + pte_pkey = (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) | + ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL)); + + if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) || + mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { + if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY)) + return HASH_DEFAULT_KERNEL_KEY; + } + + return pte_pkey; } static inline u16 hash__pte_to_pkey_bits(u64 pteflags) diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 73ad038ed10b..d959b0195ad9 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -145,7 +145,7 @@ extern void hash__mark_initmem_nx(void); extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge); -extern unsigned long htab_convert_pte_flags(unsigned long pteflags); +unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags); /* Atomic PTE updates */ static inline unsigned long hash__pte_update(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h index 6b5c3a248ba2..d4b9d476ecba 100644 --- a/arch/powerpc/include/asm/book3s/64/kexec.h +++ b/arch/powerpc/include/asm/book3s/64/kexec.h @@ -3,6 +3,7 @@ #ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_ #define _ASM_POWERPC_BOOK3S_64_KEXEC_H_ +#include <asm/plpar_wrappers.h> #define reset_sprs reset_sprs static inline void reset_sprs(void) @@ -14,6 +15,10 @@ static inline void reset_sprs(void) if (cpu_has_feature(CPU_FTR_ARCH_207S)) { mtspr(SPRN_IAMR, 0); + if (cpu_has_feature(CPU_FTR_HVMODE)) + mtspr(SPRN_CIABR, 0); + else + plpar_set_ciabr(0); } /* Do we need isync()? We are going via a kexec reset */ diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h deleted file mode 100644 index a39e2d193fdc..000000000000 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ /dev/null @@ -1,205 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H -#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H - -#include <linux/const.h> -#include <asm/reg.h> - -#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000) -#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000) -#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) -#define AMR_KUAP_SHIFT 62 - -#ifdef __ASSEMBLY__ - -.macro kuap_restore_amr gpr1, gpr2 -#ifdef CONFIG_PPC_KUAP - BEGIN_MMU_FTR_SECTION_NESTED(67) - mfspr \gpr1, SPRN_AMR - ld \gpr2, STACK_REGS_KUAP(r1) - cmpd \gpr1, \gpr2 - beq 998f - isync - mtspr SPRN_AMR, \gpr2 - /* No isync required, see kuap_restore_amr() */ -998: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm - -#ifdef CONFIG_PPC_KUAP -.macro kuap_check_amr gpr1, gpr2 -#ifdef CONFIG_PPC_KUAP_DEBUG - BEGIN_MMU_FTR_SECTION_NESTED(67) - mfspr \gpr1, SPRN_AMR - li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) - sldi \gpr2, \gpr2, AMR_KUAP_SHIFT -999: tdne \gpr1, \gpr2 - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm -#endif - -.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr -#ifdef CONFIG_PPC_KUAP - BEGIN_MMU_FTR_SECTION_NESTED(67) - .ifnb \msr_pr_cr - bne \msr_pr_cr, 99f - .endif - mfspr \gpr1, SPRN_AMR - std \gpr1, STACK_REGS_KUAP(r1) - li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) - sldi \gpr2, \gpr2, AMR_KUAP_SHIFT - cmpd \use_cr, \gpr1, \gpr2 - beq \use_cr, 99f - // We don't isync here because we very recently entered via rfid - mtspr SPRN_AMR, \gpr2 - isync -99: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm - -#else /* !__ASSEMBLY__ */ - -#include <linux/jump_label.h> - -DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); - -#ifdef CONFIG_PPC_KUAP - -#include <asm/mmu.h> -#include <asm/ptrace.h> - -static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) -{ - if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) { - isync(); - mtspr(SPRN_AMR, regs->kuap); - /* - * No isync required here because we are about to RFI back to - * previous context before any user accesses would be made, - * which is a CSI. - */ - } -} - -static inline unsigned long kuap_get_and_check_amr(void) -{ - if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { - unsigned long amr = mfspr(SPRN_AMR); - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ - WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); - return amr; - } - return 0; -} - -static inline void kuap_check_amr(void) -{ - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) - WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); -} - -/* - * We support individually allowing read or write, but we don't support nesting - * because that would require an expensive read/modify write of the AMR. - */ - -static inline unsigned long get_kuap(void) -{ - /* - * We return AMR_KUAP_BLOCKED when we don't support KUAP because - * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to - * cause restore_user_access to do a flush. - * - * This has no effect in terms of actually blocking things on hash, - * so it doesn't break anything. - */ - if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) - return AMR_KUAP_BLOCKED; - - return mfspr(SPRN_AMR); -} - -static inline void set_kuap(unsigned long value) -{ - if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) - return; - - /* - * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both - * before and after the move to AMR. See table 6 on page 1134. - */ - isync(); - mtspr(SPRN_AMR, value); - isync(); -} - -static inline bool -bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) -{ - return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && - (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), - "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); -} -#else /* CONFIG_PPC_KUAP */ -static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } - -static inline unsigned long kuap_get_and_check_amr(void) -{ - return 0UL; -} - -static inline unsigned long get_kuap(void) -{ - return AMR_KUAP_BLOCKED; -} - -static inline void set_kuap(unsigned long value) { } -#endif /* !CONFIG_PPC_KUAP */ - -static __always_inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) -{ - // This is written so we can resolve to a single case at build time - BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (dir == KUAP_READ) - set_kuap(AMR_KUAP_BLOCK_WRITE); - else if (dir == KUAP_WRITE) - set_kuap(AMR_KUAP_BLOCK_READ); - else if (dir == KUAP_READ_WRITE) - set_kuap(0); - else - BUILD_BUG(); -} - -static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) -{ - set_kuap(AMR_KUAP_BLOCKED); - if (static_branch_unlikely(&uaccess_flush_key)) - do_uaccess_flush(); -} - -static inline unsigned long prevent_user_access_return(void) -{ - unsigned long flags = get_kuap(); - - set_kuap(AMR_KUAP_BLOCKED); - if (static_branch_unlikely(&uaccess_flush_key)) - do_uaccess_flush(); - - return flags; -} - -static inline void restore_user_access(unsigned long flags) -{ - set_kuap(flags); - if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) - do_uaccess_flush(); -} -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h new file mode 100644 index 000000000000..f50f72e535aa --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H +#define _ASM_POWERPC_BOOK3S_64_KUP_H + +#include <linux/const.h> +#include <asm/reg.h> + +#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555) +#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa) +#define AMR_KUEP_BLOCKED UL(0x5455555555555555) +#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) + +#ifdef __ASSEMBLY__ + +.macro kuap_user_restore gpr1, gpr2 +#if defined(CONFIG_PPC_PKEY) + BEGIN_MMU_FTR_SECTION_NESTED(67) + b 100f // skip_restore_amr + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67) + /* + * AMR and IAMR are going to be different when + * returning to userspace. + */ + ld \gpr1, STACK_REGS_AMR(r1) + + /* + * If kuap feature is not enabled, do the mtspr + * only if AMR value is different. + */ + BEGIN_MMU_FTR_SECTION_NESTED(68) + mfspr \gpr2, SPRN_AMR + cmpd \gpr1, \gpr2 + beq 99f + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68) + + isync + mtspr SPRN_AMR, \gpr1 +99: + /* + * Restore IAMR only when returning to userspace + */ + ld \gpr1, STACK_REGS_IAMR(r1) + + /* + * If kuep feature is not enabled, do the mtspr + * only if IAMR value is different. + */ + BEGIN_MMU_FTR_SECTION_NESTED(69) + mfspr \gpr2, SPRN_IAMR + cmpd \gpr1, \gpr2 + beq 100f + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69) + + isync + mtspr SPRN_IAMR, \gpr1 + +100: //skip_restore_amr + /* No isync required, see kuap_user_restore() */ +#endif +.endm + +.macro kuap_kernel_restore gpr1, gpr2 +#if defined(CONFIG_PPC_PKEY) + + BEGIN_MMU_FTR_SECTION_NESTED(67) + /* + * AMR is going to be mostly the same since we are + * returning to the kernel. Compare and do a mtspr. + */ + ld \gpr2, STACK_REGS_AMR(r1) + mfspr \gpr1, SPRN_AMR + cmpd \gpr1, \gpr2 + beq 100f + isync + mtspr SPRN_AMR, \gpr2 + /* + * No isync required, see kuap_restore_amr() + * No need to restore IAMR when returning to kernel space. + */ +100: + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) +#endif +.endm + +#ifdef CONFIG_PPC_KUAP +.macro kuap_check_amr gpr1, gpr2 +#ifdef CONFIG_PPC_KUAP_DEBUG + BEGIN_MMU_FTR_SECTION_NESTED(67) + mfspr \gpr1, SPRN_AMR + /* Prevent access to userspace using any key values */ + LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) +999: tdne \gpr1, \gpr2 + EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67) +#endif +.endm +#endif + +/* + * if (pkey) { + * + * save AMR -> stack; + * if (kuap) { + * if (AMR != BLOCKED) + * KUAP_BLOCKED -> AMR; + * } + * if (from_user) { + * save IAMR -> stack; + * if (kuep) { + * KUEP_BLOCKED ->IAMR + * } + * } + * return; + * } + * + * if (kuap) { + * if (from_kernel) { + * save AMR -> stack; + * if (AMR != BLOCKED) + * KUAP_BLOCKED -> AMR; + * } + * + * } + */ +.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr +#if defined(CONFIG_PPC_PKEY) + + /* + * if both pkey and kuap is disabled, nothing to do + */ + BEGIN_MMU_FTR_SECTION_NESTED(68) + b 100f // skip_save_amr + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68) + + /* + * if pkey is disabled and we are entering from userspace + * don't do anything. + */ + BEGIN_MMU_FTR_SECTION_NESTED(67) + .ifnb \msr_pr_cr + /* + * Without pkey we are not changing AMR outside the kernel + * hence skip this completely. + */ + bne \msr_pr_cr, 100f // from userspace + .endif + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67) + + /* + * pkey is enabled or pkey is disabled but entering from kernel + */ + mfspr \gpr1, SPRN_AMR + std \gpr1, STACK_REGS_AMR(r1) + + /* + * update kernel AMR with AMR_KUAP_BLOCKED only + * if KUAP feature is enabled + */ + BEGIN_MMU_FTR_SECTION_NESTED(69) + LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED) + cmpd \use_cr, \gpr1, \gpr2 + beq \use_cr, 102f + /* + * We don't isync here because we very recently entered via an interrupt + */ + mtspr SPRN_AMR, \gpr2 + isync +102: + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69) + + /* + * if entering from kernel we don't need save IAMR + */ + .ifnb \msr_pr_cr + beq \msr_pr_cr, 100f // from kernel space + mfspr \gpr1, SPRN_IAMR + std \gpr1, STACK_REGS_IAMR(r1) + + /* + * update kernel IAMR with AMR_KUEP_BLOCKED only + * if KUEP feature is enabled + */ + BEGIN_MMU_FTR_SECTION_NESTED(70) + LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED) + mtspr SPRN_IAMR, \gpr2 + isync + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70) + .endif + +100: // skip_save_amr +#endif +.endm + +#else /* !__ASSEMBLY__ */ + +#include <linux/jump_label.h> + +DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); + +#ifdef CONFIG_PPC_PKEY + +#include <asm/mmu.h> +#include <asm/ptrace.h> + +/* + * For kernel thread that doesn't have thread.regs return + * default AMR/IAMR values. + */ +static inline u64 current_thread_amr(void) +{ + if (current->thread.regs) + return current->thread.regs->amr; + return AMR_KUAP_BLOCKED; +} + +static inline u64 current_thread_iamr(void) +{ + if (current->thread.regs) + return current->thread.regs->iamr; + return AMR_KUEP_BLOCKED; +} +#endif /* CONFIG_PPC_PKEY */ + +#ifdef CONFIG_PPC_KUAP + +static inline void kuap_user_restore(struct pt_regs *regs) +{ + bool restore_amr = false, restore_iamr = false; + unsigned long amr, iamr; + + if (!mmu_has_feature(MMU_FTR_PKEY)) + return; + + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { + amr = mfspr(SPRN_AMR); + if (amr != regs->amr) + restore_amr = true; + } else { + restore_amr = true; + } + + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { + iamr = mfspr(SPRN_IAMR); + if (iamr != regs->iamr) + restore_iamr = true; + } else { + restore_iamr = true; + } + + + if (restore_amr || restore_iamr) { + isync(); + if (restore_amr) + mtspr(SPRN_AMR, regs->amr); + if (restore_iamr) + mtspr(SPRN_IAMR, regs->iamr); + } + /* + * No isync required here because we are about to rfi + * back to previous context before any user accesses + * would be made, which is a CSI. + */ +} + +static inline void kuap_kernel_restore(struct pt_regs *regs, + unsigned long amr) +{ + if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { + if (unlikely(regs->amr != amr)) { + isync(); + mtspr(SPRN_AMR, regs->amr); + /* + * No isync required here because we are about to rfi + * back to previous context before any user accesses + * would be made, which is a CSI. + */ + } + } + /* + * No need to restore IAMR when returning to kernel space. + */ +} + +static inline unsigned long kuap_get_and_check_amr(void) +{ + if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { + unsigned long amr = mfspr(SPRN_AMR); + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ + WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); + return amr; + } + return 0; +} + +#else /* CONFIG_PPC_PKEY */ + +static inline void kuap_user_restore(struct pt_regs *regs) +{ +} + +static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) +{ +} + +static inline unsigned long kuap_get_and_check_amr(void) +{ + return 0; +} + +#endif /* CONFIG_PPC_PKEY */ + + +#ifdef CONFIG_PPC_KUAP + +static inline void kuap_check_amr(void) +{ + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); +} + +/* + * We support individually allowing read or write, but we don't support nesting + * because that would require an expensive read/modify write of the AMR. + */ + +static inline unsigned long get_kuap(void) +{ + /* + * We return AMR_KUAP_BLOCKED when we don't support KUAP because + * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to + * cause restore_user_access to do a flush. + * + * This has no effect in terms of actually blocking things on hash, + * so it doesn't break anything. + */ + if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + return AMR_KUAP_BLOCKED; + + return mfspr(SPRN_AMR); +} + +static inline void set_kuap(unsigned long value) +{ + if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + return; + + /* + * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both + * before and after the move to AMR. See table 6 on page 1134. + */ + isync(); + mtspr(SPRN_AMR, value); + isync(); +} + +static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, + bool is_write) +{ + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + return false; + /* + * For radix this will be a storage protection fault (DSISR_PROTFAULT). + * For hash this will be a key fault (DSISR_KEYFAULT) + */ + /* + * We do have exception table entry, but accessing the + * userspace results in fault. This could be because we + * didn't unlock the AMR or access is denied by userspace + * using a key value that blocks access. We are only interested + * in catching the use case of accessing without unlocking + * the AMR. Hence check for BLOCK_WRITE/READ against AMR. + */ + if (is_write) { + return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE; + } + return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ; +} + +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + unsigned long thread_amr = 0; + + // This is written so we can resolve to a single case at build time + BUILD_BUG_ON(!__builtin_constant_p(dir)); + + if (mmu_has_feature(MMU_FTR_PKEY)) + thread_amr = current_thread_amr(); + + if (dir == KUAP_READ) + set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE); + else if (dir == KUAP_WRITE) + set_kuap(thread_amr | AMR_KUAP_BLOCK_READ); + else if (dir == KUAP_READ_WRITE) + set_kuap(thread_amr); + else + BUILD_BUG(); +} + +#else /* CONFIG_PPC_KUAP */ + +static inline unsigned long get_kuap(void) +{ + return AMR_KUAP_BLOCKED; +} + +static inline void set_kuap(unsigned long value) { } + +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ } + +#endif /* !CONFIG_PPC_KUAP */ + +static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + set_kuap(AMR_KUAP_BLOCKED); + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); +} + +static inline unsigned long prevent_user_access_return(void) +{ + unsigned long flags = get_kuap(); + + set_kuap(AMR_KUAP_BLOCKED); + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); + + return flags; +} + +static inline void restore_user_access(unsigned long flags) +{ + set_kuap(flags); + if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) + do_uaccess_flush(); +} +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */ diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 683a9c7d1b03..066b1d34c7bc 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -452,6 +452,7 @@ static inline unsigned long hpt_hash(unsigned long vpn, #define HPTE_LOCAL_UPDATE 0x1 #define HPTE_NOHPTE_UPDATE 0x2 +#define HPTE_USE_KERNEL_KEY 0x4 extern int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, @@ -842,6 +843,32 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) unsigned htab_shift_for_mem_size(unsigned long mem_size); -#endif /* __ASSEMBLY__ */ +enum slb_index { + LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ + KSTACK_INDEX = 1, /* Kernel stack map */ +}; +#define slb_esid_mask(ssize) \ + (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T) + +static inline unsigned long mk_esid_data(unsigned long ea, int ssize, + enum slb_index index) +{ + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; +} + +static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, + unsigned long flags) +{ + return (vsid << slb_vsid_shift(ssize)) | flags | + ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT); +} + +static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, + unsigned long flags) +{ + return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); +} + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */ diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 750918451dd2..995bbcdd0ef8 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -111,7 +111,7 @@ typedef struct { struct hash_mm_context *hash_context; - unsigned long vdso_base; + void __user *vdso; /* * pagetable fragment support */ @@ -199,7 +199,7 @@ extern int mmu_io_psize; void mmu_early_init_devtree(void); void hash__early_init_devtree(void); void radix__early_init_devtree(void); -#ifdef CONFIG_PPC_MEM_KEYS +#ifdef CONFIG_PPC_PKEY void pkey_early_init_devtree(void); #else static inline void pkey_early_init_devtree(void) {} diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index cd3feeac6e87..a39886681629 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1231,13 +1231,28 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) return hash__pmd_same(pmd_a, pmd_b); } -static inline pmd_t pmd_mkhuge(pmd_t pmd) +static inline pmd_t __pmd_mkhuge(pmd_t pmd) { if (radix_enabled()) return radix__pmd_mkhuge(pmd); return hash__pmd_mkhuge(pmd); } +/* + * pfn_pmd return a pmd_t that can be used as pmd pte entry. + */ +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ +#ifdef CONFIG_DEBUG_VM + if (radix_enabled()) + WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)) == 0); + else + WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)) != + cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)); +#endif + return pmd; +} + #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h index b7d9f4267bcd..3b8640498f5b 100644 --- a/arch/powerpc/include/asm/book3s/64/pkeys.h +++ b/arch/powerpc/include/asm/book3s/64/pkeys.h @@ -6,6 +6,8 @@ #include <asm/book3s/64/hash-pkey.h> extern u64 __ro_after_init default_uamor; +extern u64 __ro_after_init default_amr; +extern u64 __ro_after_init default_iamr; static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) { diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 338f36cd9934..464f8ca8a5c9 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -12,7 +12,7 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE .macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" -5001: PPC_LONG \addr, 5002f +5001: .4byte \addr - 5001b, 5002f - 5001b .short \line, \flags .org 5001b+BUG_ENTRY_SIZE .previous @@ -23,7 +23,7 @@ #else .macro EMIT_BUG_ENTRY addr,file,line,flags .section __bug_table,"aw" -5001: PPC_LONG \addr +5001: .4byte \addr - 5001b .short \flags .org 5001b+BUG_ENTRY_SIZE .previous @@ -36,14 +36,14 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE #define _EMIT_BUG_ENTRY \ ".section __bug_table,\"aw\"\n" \ - "2:\t" PPC_LONG "1b, %0\n" \ + "2:\t.4byte 1b - 2b, %0 - 2b\n" \ "\t.short %1, %2\n" \ ".org 2b+%3\n" \ ".previous\n" #else #define _EMIT_BUG_ENTRY \ ".section __bug_table,\"aw\"\n" \ - "2:\t" PPC_LONG "1b\n" \ + "2:\t.4byte 1b - 2b\n" \ "\t.short %2\n" \ ".org 2b+%3\n" \ ".previous\n" @@ -113,6 +113,7 @@ struct pt_regs; extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); extern void bad_page_fault(struct pt_regs *, unsigned long, int); +void __bad_page_fault(struct pt_regs *regs, unsigned long address, int sig); extern void _exception(int, struct pt_regs *, int, unsigned long); extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 82f099ba2411..d5da7ddbf0fc 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -163,7 +163,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) */ __wsum __csum_partial(const void *buff, int len, __wsum sum); -static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum) { if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) { if (len == 2) diff --git a/arch/powerpc/include/asm/clocksource.h b/arch/powerpc/include/asm/clocksource.h new file mode 100644 index 000000000000..0a26ef13a34a --- /dev/null +++ b/arch/powerpc/include/asm/clocksource.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_CLOCKSOURCE_H +#define _ASM_POWERPC_CLOCKSOURCE_H + +#include <asm/vdso/clocksource.h> + +#endif /* _ASM_POWERPC_CLOCKSOURCE_H */ diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index a116fe931789..3bdd74739cb8 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -68,6 +68,7 @@ extern void cpm_reset(void); #define PROFF_SPI ((uint)0x0180) #define PROFF_SCC3 ((uint)0x0200) #define PROFF_SMC1 ((uint)0x0280) +#define PROFF_DSP1 ((uint)0x02c0) #define PROFF_SCC4 ((uint)0x0300) #define PROFF_SMC2 ((uint)0x0380) diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h new file mode 100644 index 000000000000..24be9131f803 --- /dev/null +++ b/arch/powerpc/include/asm/cpu_setup_power.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 IBM Corporation + */ +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power7(void); +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power8(void); +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power9(void); +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power10(void); diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 3d2f94afc13a..5f21a5bab467 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -41,7 +41,6 @@ extern int machine_check_4xx(struct pt_regs *regs); extern int machine_check_440A(struct pt_regs *regs); extern int machine_check_e500mc(struct pt_regs *regs); extern int machine_check_e500(struct pt_regs *regs); -extern int machine_check_e200(struct pt_regs *regs); extern int machine_check_47x(struct pt_regs *regs); int machine_check_8xx(struct pt_regs *regs); int machine_check_83xx(struct pt_regs *regs); @@ -137,7 +136,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_DBELL ASM_CONST(0x00000004) #define CPU_FTR_CAN_NAP ASM_CONST(0x00000008) #define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00000010) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00000020) +// ASM_CONST(0x00000020) Free #define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00000040) #define CPU_FTR_LWSYNC ASM_CONST(0x00000080) #define CPU_FTR_NOEXECUTE ASM_CONST(0x00000100) @@ -219,9 +218,7 @@ static inline void cpu_feature_keys_init(void) { } #ifndef __ASSEMBLY__ -#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) - -#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE) +#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE) /* We only set the altivec features if the kernel was compiled with altivec * support @@ -369,7 +366,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX) #define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_MAYBE_CAN_NAP) + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE) #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_COMMON | CPU_FTR_NOEXECUTE) @@ -378,38 +375,33 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON) #define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE) -#define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_44X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_440x6 (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ +#define CPU_FTRS_40X (CPU_FTR_NOEXECUTE) +#define CPU_FTRS_44X (CPU_FTR_NOEXECUTE) +#define CPU_FTRS_440x6 (CPU_FTR_NOEXECUTE | \ CPU_FTR_INDEXED_DCR) #define CPU_FTRS_47X (CPU_FTRS_440x6) -#define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \ - CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ - CPU_FTR_NOEXECUTE | \ - CPU_FTR_DEBUG_LVL_EXC) #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NOEXECUTE) #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ - CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_NODSISRALIGN | \ + CPU_FTR_NOEXECUTE) +#define CPU_FTRS_E500MC ( \ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) /* * e5500/e6500 erratum A-006958 is a timebase bug that can use the * same workaround as CPU_FTR_CELL_TB_BUG. */ -#define CPU_FTRS_E5500 (CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E5500 ( \ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG) -#define CPU_FTRS_E6500 (CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E6500 ( \ CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) -#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \ @@ -489,7 +481,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) #define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2) -#ifdef __powerpc64__ +#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_BOOK3E #define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else @@ -510,18 +502,19 @@ static inline void cpu_feature_keys_init(void) { } #else enum { CPU_FTRS_POSSIBLE = -#ifdef CONFIG_PPC_BOOK3S_32 - CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU | +#ifdef CONFIG_PPC_BOOK3S_604 + CPU_FTRS_604 | CPU_FTRS_740_NOTAU | CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 | CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX | CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 | CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 | CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 | - CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | - CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_CLASSIC32 | -#else - CPU_FTRS_GENERIC_32 | +#endif +#ifdef CONFIG_PPC_BOOK3S_603 + CPU_FTRS_603 | CPU_FTRS_82XX | + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | #endif #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | @@ -529,14 +522,10 @@ enum { #ifdef CONFIG_40x CPU_FTRS_40X | #endif -#ifdef CONFIG_44x - CPU_FTRS_44X | CPU_FTRS_440x6 | -#endif #ifdef CONFIG_PPC_47x CPU_FTRS_47X | CPU_FTR_476_DD2 | -#endif -#ifdef CONFIG_E200 - CPU_FTRS_E200 | +#elif defined(CONFIG_44x) + CPU_FTRS_44X | CPU_FTRS_440x6 | #endif #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | @@ -548,7 +537,7 @@ enum { }; #endif /* __powerpc64__ */ -#ifdef __powerpc64__ +#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_BOOK3E #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else @@ -557,7 +546,6 @@ enum { #define CPU_FTRS_DT_CPU_BASE \ (CPU_FTR_LWSYNC | \ CPU_FTR_FPU_UNAVAILABLE | \ - CPU_FTR_NODSISRALIGN | \ CPU_FTR_NOEXECUTE | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_STCX_CHECKS_ADDRESS | \ @@ -586,18 +574,19 @@ enum { #else enum { CPU_FTRS_ALWAYS = -#ifdef CONFIG_PPC_BOOK3S_32 - CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU & +#ifdef CONFIG_PPC_BOOK3S_604 + CPU_FTRS_604 & CPU_FTRS_740_NOTAU & CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 & CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX & CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 & CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 & CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 & - CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & - CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_CLASSIC32 & -#else - CPU_FTRS_GENERIC_32 & +#endif +#ifdef CONFIG_PPC_BOOK3S_603 + CPU_FTRS_603 & CPU_FTRS_82XX & + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & #endif #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & @@ -605,12 +594,11 @@ enum { #ifdef CONFIG_40x CPU_FTRS_40X & #endif -#ifdef CONFIG_44x +#ifdef CONFIG_PPC_47x + CPU_FTRS_47X & +#elif defined(CONFIG_44x) CPU_FTRS_44X & CPU_FTRS_440x6 & #endif -#ifdef CONFIG_E200 - CPU_FTRS_E200 & -#endif #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & #endif diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 53ed2ca40151..b8425e3cfd81 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -168,8 +168,8 @@ do { \ /* Cache size items */ \ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ - NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ - VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ + NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ + VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\ ARCH_DLINFO_CACHE_GEOMETRY; \ } while (0) diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index fbd406cd6916..f6d2acb57425 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -36,6 +36,24 @@ label##2: \ .align 2; \ label##3: + +#ifndef CONFIG_CC_IS_CLANG +#define CHECK_ALT_SIZE(else_size, body_size) \ + .ifgt (else_size) - (body_size); \ + .error "Feature section else case larger than body"; \ + .endif; +#else +/* + * If we use the ifgt syntax above, clang's assembler complains about the + * expression being non-absolute when the code appears in an inline assembly + * statement. + * As a workaround use an .org directive that has no effect if the else case + * instructions are smaller than the body, but fails otherwise. + */ +#define CHECK_ALT_SIZE(else_size, body_size) \ + .org . + ((else_size) > (body_size)); +#endif + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ label##4: \ .popsection; \ @@ -48,9 +66,7 @@ label##5: \ FTR_ENTRY_OFFSET label##2b-label##5b; \ FTR_ENTRY_OFFSET label##3b-label##5b; \ FTR_ENTRY_OFFSET label##4b-label##5b; \ - .ifgt (label##4b- label##3b)-(label##2b- label##1b); \ - .error "Feature section else case larger than body"; \ - .endif; \ + CHECK_ALT_SIZE((label##4b-label##3b), (label##2b-label##1b)); \ .popsection; @@ -100,6 +116,9 @@ label##5: \ #define END_MMU_FTR_SECTION_NESTED_IFSET(msk, label) \ END_MMU_FTR_SECTION_NESTED((msk), (msk), label) +#define END_MMU_FTR_SECTION_NESTED_IFCLR(msk, label) \ + END_MMU_FTR_SECTION_NESTED((msk), 0, label) + #define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk)) #define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0) diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 0b295bdb201e..aa6a5ef5d483 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -134,12 +134,6 @@ extern int ibm_nmi_interlock_token; extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST) -bool is_kvm_guest(void); -#else -static inline bool is_kvm_guest(void) { return false; } -#endif - #ifdef CONFIG_PPC_PSERIES void pseries_probe_fw_features(void); #else diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c1fbccb04390..c98f5141e3fc 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -155,6 +155,14 @@ #define H_VASI_RESUMED 5 #define H_VASI_COMPLETED 6 +/* VASI signal codes. Only the Cancel code is valid for H_VASI_SIGNAL. */ +#define H_VASI_SIGNAL_CANCEL 1 +#define H_VASI_SIGNAL_ABORT 2 +#define H_VASI_SIGNAL_SUSPEND 3 +#define H_VASI_SIGNAL_COMPLETE 4 +#define H_VASI_SIGNAL_ENABLE 5 +#define H_VASI_SIGNAL_FAILOVER 6 + /* Each control block has to be on a 4K boundary */ #define H_CB_ALIGNMENT 4096 @@ -261,6 +269,7 @@ #define H_ADD_CONN 0x284 #define H_DEL_CONN 0x288 #define H_JOIN 0x298 +#define H_VASI_SIGNAL 0x2A0 #define H_VASI_STATE 0x2A4 #define H_VIOCTL 0x2A8 #define H_ENABLE_CRQ 0x2B0 diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 58635960403c..273edd208ec5 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ - : "=r" (ret) : "m" (*addr) : "memory"); \ + : "=r" (ret) : "m"UPD_CONSTR (*addr) : "memory"); \ return ret; \ } @@ -130,7 +130,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ - : "=m" (*addr) : "r" (val) : "memory"); \ + : "=m"UPD_CONSTR (*addr) : "r" (val) : "memory"); \ mmiowb_set_pending(); \ } @@ -302,41 +302,56 @@ static inline unsigned char __raw_readb(const volatile void __iomem *addr) { return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr); } +#define __raw_readb __raw_readb + static inline unsigned short __raw_readw(const volatile void __iomem *addr) { return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr); } +#define __raw_readw __raw_readw + static inline unsigned int __raw_readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr); } +#define __raw_readl __raw_readl + static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) { *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v; } +#define __raw_writeb __raw_writeb + static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) { *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v; } +#define __raw_writew __raw_writew + static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) { *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v; } +#define __raw_writel __raw_writel #ifdef __powerpc64__ static inline unsigned long __raw_readq(const volatile void __iomem *addr) { return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr); } +#define __raw_readq __raw_readq + static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) { *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; } +#define __raw_writeq __raw_writeq static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) { __raw_writeq((__force unsigned long)cpu_to_be64(v), addr); } +#define __raw_writeq_be __raw_writeq_be /* * Real mode versions of the above. Those instructions are only supposed @@ -609,10 +624,37 @@ static inline void name at \ /* Some drivers check for the presence of readq & writeq with * a #ifdef, so we make them happy here. */ +#define readb readb +#define readw readw +#define readl readl +#define writeb writeb +#define writew writew +#define writel writel +#define readsb readsb +#define readsw readsw +#define readsl readsl +#define writesb writesb +#define writesw writesw +#define writesl writesl +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl #ifdef __powerpc64__ #define readq readq #define writeq writeq #endif +#define memset_io memset_io +#define memcpy_fromio memcpy_fromio +#define memcpy_toio memcpy_toio /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem @@ -637,7 +679,106 @@ static inline void name at \ #define writel_relaxed(v, addr) writel(v, addr) #define writeq_relaxed(v, addr) writeq(v, addr) +#ifdef CONFIG_GENERIC_IOMAP #include <asm-generic/iomap.h> +#else +/* + * Here comes the implementation of the IOMAP interfaces. + */ +static inline unsigned int ioread16be(const void __iomem *addr) +{ + return readw_be(addr); +} +#define ioread16be ioread16be + +static inline unsigned int ioread32be(const void __iomem *addr) +{ + return readl_be(addr); +} +#define ioread32be ioread32be + +#ifdef __powerpc64__ +static inline u64 ioread64_lo_hi(const void __iomem *addr) +{ + return readq(addr); +} +#define ioread64_lo_hi ioread64_lo_hi + +static inline u64 ioread64_hi_lo(const void __iomem *addr) +{ + return readq(addr); +} +#define ioread64_hi_lo ioread64_hi_lo + +static inline u64 ioread64be(const void __iomem *addr) +{ + return readq_be(addr); +} +#define ioread64be ioread64be + +static inline u64 ioread64be_lo_hi(const void __iomem *addr) +{ + return readq_be(addr); +} +#define ioread64be_lo_hi ioread64be_lo_hi + +static inline u64 ioread64be_hi_lo(const void __iomem *addr) +{ + return readq_be(addr); +} +#define ioread64be_hi_lo ioread64be_hi_lo +#endif /* __powerpc64__ */ + +static inline void iowrite16be(u16 val, void __iomem *addr) +{ + writew_be(val, addr); +} +#define iowrite16be iowrite16be + +static inline void iowrite32be(u32 val, void __iomem *addr) +{ + writel_be(val, addr); +} +#define iowrite32be iowrite32be + +#ifdef __powerpc64__ +static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + writeq(val, addr); +} +#define iowrite64_lo_hi iowrite64_lo_hi + +static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + writeq(val, addr); +} +#define iowrite64_hi_lo iowrite64_hi_lo + +static inline void iowrite64be(u64 val, void __iomem *addr) +{ + writeq_be(val, addr); +} +#define iowrite64be iowrite64be + +static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + writeq_be(val, addr); +} +#define iowrite64be_lo_hi iowrite64be_lo_hi + +static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + writeq_be(val, addr); +} +#define iowrite64be_hi_lo iowrite64be_hi_lo +#endif /* __powerpc64__ */ + +struct pci_dev; +void pci_iounmap(struct pci_dev *dev, void __iomem *addr); +#define pci_iounmap pci_iounmap +void __iomem *ioport_map(unsigned long port, unsigned int len); +#define ioport_map ioport_map +#endif static inline void iosync(void) { @@ -670,7 +811,6 @@ static inline void iosync(void) #define IO_SPACE_LIMIT ~(0UL) - /** * ioremap - map bus memory into CPU space * @address: bus address of the memory @@ -706,7 +846,13 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, unsigned long flags); extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); +#define ioremap_wc ioremap_wc + +#ifdef CONFIG_PPC32 void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); +#define ioremap_wt ioremap_wt +#endif + void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_uc(addr, size) ioremap((addr), (size)) #define ioremap_cache(addr, size) \ @@ -766,6 +912,7 @@ static inline unsigned long virt_to_phys(volatile void * address) return __pa((unsigned long)address); } +#define virt_to_phys virt_to_phys /** * phys_to_virt - map physical address to virtual @@ -783,6 +930,7 @@ static inline void * phys_to_virt(unsigned long address) { return (void *)__va(address); } +#define phys_to_virt phys_to_virt /* * Change "struct page" to physical address. @@ -810,6 +958,7 @@ static inline unsigned long virt_to_bus(volatile void * address) return 0; return __pa(address) + PCI_DRAM_OFFSET; } +#define virt_to_bus virt_to_bus static inline void * bus_to_virt(unsigned long address) { @@ -817,6 +966,7 @@ static inline void * bus_to_virt(unsigned long address) return NULL; return __va(address - PCI_DRAM_OFFSET); } +#define bus_to_virt bus_to_virt #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) @@ -855,6 +1005,8 @@ static inline void * bus_to_virt(unsigned long address) #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) +#include <asm-generic/io.h> + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_IO_H */ diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 0d93331d0fab..bf221a2a523e 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -15,11 +15,13 @@ #define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) #ifdef CONFIG_PPC_BOOK3S_64 -#include <asm/book3s/64/kup-radix.h> +#include <asm/book3s/64/kup.h> #endif + #ifdef CONFIG_PPC_8xx #include <asm/nohash/32/kup-8xx.h> #endif + #ifdef CONFIG_PPC_BOOK3S_32 #include <asm/book3s/32/kup.h> #endif @@ -42,9 +44,10 @@ #else /* !__ASSEMBLY__ */ -#include <linux/pgtable.h> +extern bool disable_kuep; +extern bool disable_kuap; -void setup_kup(void); +#include <linux/pgtable.h> #ifdef CONFIG_PPC_KUEP void setup_kuep(bool disabled); @@ -80,6 +83,12 @@ static inline void restore_user_access(unsigned long flags) { } #endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_KUAP */ +static __always_inline void setup_kup(void) +{ + setup_kuep(disable_kuep); + setup_kuap(disable_kuap); +} + static inline void allow_read_from_user(const void __user *from, unsigned long size) { allow_user_access(NULL, from, size, KUAP_READ); diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h new file mode 100644 index 000000000000..2fca299f7e19 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_guest.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 IBM Corporation + */ + +#ifndef _ASM_POWERPC_KVM_GUEST_H_ +#define _ASM_POWERPC_KVM_GUEST_H_ + +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST) +#include <linux/jump_label.h> + +DECLARE_STATIC_KEY_FALSE(kvm_guest); + +static inline bool is_kvm_guest(void) +{ + return static_branch_unlikely(&kvm_guest); +} + +bool check_kvm_guest(void); +#else +static inline bool is_kvm_guest(void) { return false; } +static inline bool check_kvm_guest(void) { return false; } +#endif + +#endif /* _ASM_POWERPC_KVM_GUEST_H_ */ diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 744612054c94..abe1b5e82547 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -8,7 +8,7 @@ #ifndef __POWERPC_KVM_PARA_H__ #define __POWERPC_KVM_PARA_H__ -#include <asm/firmware.h> +#include <asm/kvm_guest.h> #include <uapi/asm/kvm_para.h> diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 475687f24f4a..cf6ebbc16cb4 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -207,7 +207,6 @@ struct machdep_calls { void (*suspend_disable_irqs)(void); void (*suspend_enable_irqs)(void); #endif - int (*suspend_disable_cpu)(void); #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE ssize_t (*cpu_probe)(const char *, size_t); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index 89aa8248a57d..e6c27ae843dc 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -228,6 +228,7 @@ int mce_register_notifier(struct notifier_block *nb); int mce_unregister_notifier(struct notifier_block *nb); #ifdef CONFIG_PPC_BOOK3S_64 void flush_and_reload_slb(void); +void flush_erat(void); long __machine_check_early_realmode_p7(struct pt_regs *regs); long __machine_check_early_realmode_p8(struct pt_regs *regs); long __machine_check_early_realmode_p9(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/mm-arch-hooks.h b/arch/powerpc/include/asm/mm-arch-hooks.h deleted file mode 100644 index dce274be824a..000000000000 --- a/arch/powerpc/include/asm/mm-arch-hooks.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Architecture specific mm hooks - * - * Copyright (C) 2015, IBM Corporation - * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> - */ - -#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H -#define _ASM_POWERPC_MM_ARCH_HOOKS_H - -static inline void arch_remap(struct mm_struct *mm, - unsigned long old_start, unsigned long old_end, - unsigned long new_start, unsigned long new_end) -{ - /* - * mremap() doesn't allow moving multiple vmas so we can limit the - * check to old_start == vdso_base. - */ - if (old_start == mm->context.vdso_base) - mm->context.vdso_base = new_start; -} -#define arch_remap arch_remap - -#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 255a1837e9f7..80b27f5d9648 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -29,9 +29,18 @@ */ /* - * Support for KUEP feature. + * Supports KUAP feature + * key 0 controlling userspace addresses on radix + * Key 3 on hash */ -#define MMU_FTR_KUEP ASM_CONST(0x00000400) +#define MMU_FTR_BOOK3S_KUAP ASM_CONST(0x00000200) + +/* + * Supports KUEP feature + * key 0 controlling userspace addresses on radix + * Key 3 on hash + */ +#define MMU_FTR_BOOK3S_KUEP ASM_CONST(0x00000400) /* * Support for memory protection keys. @@ -120,14 +129,8 @@ */ #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) -/* - * Supports KUAP (key 0 controlling userspace addresses) on radix - */ -#define MMU_FTR_RADIX_KUAP ASM_CONST(0x80000000) - /* MMU feature bit sets for various CPUs */ -#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ - MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 +#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE) #define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2 #define MMU_FTRS_PPC970 MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA #define MMU_FTRS_POWER5 MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE @@ -154,7 +157,7 @@ DECLARE_PER_CPU(int, next_tlbcam_idx); enum { MMU_FTRS_POSSIBLE = -#ifdef CONFIG_PPC_BOOK3S +#if defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_BOOK3S_604) MMU_FTR_HPTE_TABLE | #endif #ifdef CONFIG_PPC_8xx @@ -163,17 +166,19 @@ enum { #ifdef CONFIG_40x MMU_FTR_TYPE_40x | #endif -#ifdef CONFIG_44x +#ifdef CONFIG_PPC_47x + MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL | +#elif defined(CONFIG_44x) MMU_FTR_TYPE_44x | #endif -#if defined(CONFIG_E200) || defined(CONFIG_E500) +#ifdef CONFIG_E500 MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX | #endif -#ifdef CONFIG_PPC_47x - MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL | -#endif #ifdef CONFIG_PPC_BOOK3S_32 - MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU | + MMU_FTR_USE_HIGH_BATS | +#endif +#ifdef CONFIG_PPC_83xx + MMU_FTR_NEED_DTLB_SW_LRU | #endif #ifdef CONFIG_PPC_BOOK3E_64 MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS | @@ -187,22 +192,47 @@ enum { #ifdef CONFIG_PPC_RADIX_MMU MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE | +#endif /* CONFIG_PPC_RADIX_MMU */ #ifdef CONFIG_PPC_KUAP - MMU_FTR_RADIX_KUAP | + MMU_FTR_BOOK3S_KUAP | #endif /* CONFIG_PPC_KUAP */ -#endif /* CONFIG_PPC_RADIX_MMU */ #ifdef CONFIG_PPC_MEM_KEYS MMU_FTR_PKEY | #endif #ifdef CONFIG_PPC_KUEP - MMU_FTR_KUEP | + MMU_FTR_BOOK3S_KUEP | #endif /* CONFIG_PPC_KUAP */ 0, }; +#if defined(CONFIG_PPC_BOOK3S_604) && !defined(CONFIG_PPC_BOOK3S_603) +#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE +#endif +#ifdef CONFIG_PPC_8xx +#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_8xx +#endif +#ifdef CONFIG_40x +#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_40x +#endif +#ifdef CONFIG_PPC_47x +#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_47x +#elif defined(CONFIG_44x) +#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x +#endif +#if defined(CONFIG_E200) || defined(CONFIG_E500) +#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E +#endif + +#ifndef MMU_FTRS_ALWAYS +#define MMU_FTRS_ALWAYS 0 +#endif + static inline bool early_mmu_has_feature(unsigned long feature) { + if (MMU_FTRS_ALWAYS & feature) + return true; + return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); } @@ -231,6 +261,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature) } #endif + if (MMU_FTRS_ALWAYS & feature) + return true; + if (!(MMU_FTRS_POSSIBLE & feature)) return false; diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b42813359f49..d5821834dba9 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -263,8 +263,10 @@ extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, unsigned long start, unsigned long end) { - if (start <= mm->context.vdso_base && mm->context.vdso_base < end) - mm->context.vdso_base = 0; + unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE; + + if (start <= vdso_base && vdso_base < end) + mm->context.vdso = NULL; } #ifdef CONFIG_PPC_MEM_KEYS @@ -285,7 +287,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, #define thread_pkey_regs_init(thread) #define arch_dup_pkeys(oldmm, mm) -static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) +static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags) { return 0x0UL; } diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 567cdc557402..17a4a616436f 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -63,8 +63,7 @@ static inline void restore_user_access(unsigned long flags) static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { - return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xff000000), - "Bug: fault blocked by AP register !"); + return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h index 74f4edb5916e..8a8f13a22cf4 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h @@ -57,7 +57,7 @@ typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h index 28aa3b339c5e..2d92a39d8f2e 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h @@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index; typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; /* patch sites */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 0bd1b144eb76..478249959baa 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -181,7 +181,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly); typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; void *pte_frag; } mm_context_t; diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h index b41004664312..e43a418d3ccd 100644 --- a/arch/powerpc/include/asm/nohash/mmu-book3e.h +++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h @@ -238,7 +238,7 @@ extern unsigned int tlbcam_index; typedef struct { unsigned int id; unsigned int active; - unsigned long vdso_base; + void __user *vdso; } mm_context_t; /* Page size definitions, common between 32 and 64-bit diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 6277e7596ae5..ac75f4ab0dba 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -192,9 +192,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ + stw%X0 %2,%0\n\ eieio\n\ - stw%U0%X0 %L2,%1" + stw%X1 %L2,%1" : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); return; diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index b1d8fec29169..1edb7243e515 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -10,7 +10,6 @@ * - local_flush_tlb_mm(mm, full) flushes the specified mm context on * the local processor * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor - * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 1dffa3cb16ba..0b63ba7d5917 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1091,9 +1091,9 @@ enum { OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001, OPAL_XIVE_IRQ_STORE_EOI = 0x00000002, OPAL_XIVE_IRQ_LSI = 0x00000004, - OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, - OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, - OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, + OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */ + OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */ + OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */ }; /* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */ diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index d64dfe3ac712..56f217606327 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -16,12 +16,6 @@ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES #endif -#ifdef CONFIG_PTE_64BIT -#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */ -#else -#define PTE_FLAGS_OFFSET 0 -#endif - #if defined(CONFIG_PPC_256K_PAGES) || \ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 9362c94fe3aa..edc08f04aef7 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -10,6 +10,9 @@ #endif #ifdef CONFIG_PPC_SPLPAR +#include <asm/kvm_guest.h> +#include <asm/cputhreads.h> + DECLARE_STATIC_KEY_FALSE(shared_processor); static inline bool is_shared_processor(void) @@ -74,6 +77,21 @@ static inline bool vcpu_is_preempted(int cpu) { if (!is_shared_processor()) return false; + +#ifdef CONFIG_PPC_SPLPAR + if (!is_kvm_guest()) { + int first_cpu = cpu_first_thread_sibling(smp_processor_id()); + + /* + * Preemption can only happen at core granularity. This CPU + * is not preempted if one of the CPU of this core is not + * preempted. + */ + if (cpu_first_thread_sibling(cpu) == first_cpu) + return false; + } +#endif + if (yield_count_of(cpu) & 1) return true; return false; diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index f6acabb6c9be..3b7baba01c92 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -82,6 +82,7 @@ struct power_pmu { #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ +#define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index d37ededca3ee..9acd1fbf1197 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -3,12 +3,59 @@ #ifndef _ASM_PNV_OCXL_H #define _ASM_PNV_OCXL_H +#include <linux/bitfield.h> #include <linux/pci.h> #define PNV_OCXL_TL_MAX_TEMPLATE 63 #define PNV_OCXL_TL_BITS_PER_RATE 4 #define PNV_OCXL_TL_RATE_BUF_SIZE ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8) +#define PNV_OCXL_ATSD_TIMEOUT 1 + +/* TLB Management Instructions */ +#define PNV_OCXL_ATSD_LNCH 0x00 +/* Radix Invalidate */ +#define PNV_OCXL_ATSD_LNCH_R PPC_BIT(0) +/* Radix Invalidation Control + * 0b00 Just invalidate TLB. + * 0b01 Invalidate just Page Walk Cache. + * 0b10 Invalidate TLB, Page Walk Cache, and any + * caching of Partition and Process Table Entries. + */ +#define PNV_OCXL_ATSD_LNCH_RIC PPC_BITMASK(1, 2) +/* Number and Page Size of translations to be invalidated */ +#define PNV_OCXL_ATSD_LNCH_LP PPC_BITMASK(3, 10) +/* Invalidation Criteria + * 0b00 Invalidate just the target VA. + * 0b01 Invalidate matching PID. + */ +#define PNV_OCXL_ATSD_LNCH_IS PPC_BITMASK(11, 12) +/* 0b1: Process Scope, 0b0: Partition Scope */ +#define PNV_OCXL_ATSD_LNCH_PRS PPC_BIT(13) +/* Invalidation Flag */ +#define PNV_OCXL_ATSD_LNCH_B PPC_BIT(14) +/* Actual Page Size to be invalidated + * 000 4KB + * 101 64KB + * 001 2MB + * 010 1GB + */ +#define PNV_OCXL_ATSD_LNCH_AP PPC_BITMASK(15, 17) +/* Defines the large page select + * L=0b0 for 4KB pages + * L=0b1 for large pages) + */ +#define PNV_OCXL_ATSD_LNCH_L PPC_BIT(18) +/* Process ID */ +#define PNV_OCXL_ATSD_LNCH_PID PPC_BITMASK(19, 38) +/* NoFlush – Assumed to be 0b0 */ +#define PNV_OCXL_ATSD_LNCH_F PPC_BIT(39) +#define PNV_OCXL_ATSD_LNCH_OCAPI_SLBI PPC_BIT(40) +#define PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON PPC_BIT(41) +#define PNV_OCXL_ATSD_AVA 0x08 +#define PNV_OCXL_ATSD_AVA_AVA PPC_BITMASK(0, 51) +#define PNV_OCXL_ATSD_STAT 0x10 + int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported); int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count); @@ -28,4 +75,11 @@ int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **p void pnv_ocxl_spa_release(void *platform_data); int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); +int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid, + uint64_t lpcr, void __iomem **arva); +void pnv_ocxl_unmap_lpar(void __iomem *arva); +void pnv_ocxl_tlb_invalidate(void __iomem *arva, + unsigned long pid, + unsigned long addr, + unsigned long page_size); #endif /* _ASM_PNV_OCXL_H */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index a6e3700c4566..ed161ef2b3ca 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -78,6 +78,9 @@ #define IMM_L(i) ((uintptr_t)(i) & 0xffff) #define IMM_DS(i) ((uintptr_t)(i) & 0xfffc) +#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0) +#define IMM_D0(i) (((uintptr_t)(i) >> 16) & 0x3ffff) +#define IMM_D1(i) IMM_L(i) /* * 16-bit immediate helper macros: HA() is for use with sign-extending instrs @@ -230,7 +233,6 @@ #define PPC_INST_POPCNTB_MASK 0xfc0007fe #define PPC_INST_RFEBB 0x4c000124 #define PPC_INST_RFID 0x4c000024 -#define PPC_INST_MFSPR 0x7c0002a6 #define PPC_INST_MFSPR_DSCR 0x7c1102a6 #define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe #define PPC_INST_MTSPR_DSCR 0x7c1103a6 @@ -295,6 +297,8 @@ #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_XT(s) __PPC_XS(s) +#define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21) +#define __PPC_XTP(s) __PPC_XSP(s) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) @@ -395,6 +399,14 @@ #define PPC_RAW_XVCPSGNDP(t, a, b) ((0xf0000780 | VSX_XX3((t), (a), (b)))) #define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \ ((0x1000002d | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6))) +#define PPC_RAW_LXVP(xtp, a, i) (0x18000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_DQ(i)) +#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i)) +#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_PLXVP(xtp, i, a, pr) \ + ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))) +#define PPC_RAW_PSTXVP(xsp, i, a, pr) \ + ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))) #define PPC_RAW_NAP (0x4c000364) #define PPC_RAW_SLEEP (0x4c0003a4) #define PPC_RAW_WINKLE (0x4c0003e4) @@ -507,6 +519,8 @@ #define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a)) +#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr)) + /* Deal with instructions that older assemblers aren't aware of */ #define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH) #define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 511786f0e40d..cfa814824285 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -251,6 +251,8 @@ n: #define _GLOBAL_TOC(name) _GLOBAL(name) +#define DOTSYM(a) a + #endif /* @@ -495,15 +497,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) #endif #ifdef CONFIG_PPC_BOOK3S_64 -#define RFI rfid #define MTMSRD(r) mtmsrd r #define MTMSR_EERI(reg) mtmsrd reg,1 #else -#ifndef CONFIG_40x -#define RFI rfi -#else -#define RFI rfi; b . /* Prevent prefetch past rfi */ -#endif #define MTMSRD(r) mtmsr r #define MTMSR_EERI(reg) mtmsr reg #endif diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index c61c859b51a8..8acc3590c971 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -6,6 +6,8 @@ * Copyright (C) 2001 PPC 64 Team, IBM Corp */ +#include <vdso/processor.h> + #include <asm/reg.h> #ifdef CONFIG_VSX @@ -63,14 +65,6 @@ extern int _chrp_type; #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ -/* Macros for adjusting thread priority (hardware multi-threading) */ -#define HMT_very_low() asm volatile("or 31,31,31 # very low priority") -#define HMT_low() asm volatile("or 1,1,1 # low priority") -#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") -#define HMT_medium() asm volatile("or 2,2,2 # medium priority") -#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") -#define HMT_high() asm volatile("or 3,3,3 # high priority") - #ifdef __KERNEL__ #ifdef CONFIG_PPC64 @@ -170,8 +164,10 @@ struct thread_struct { #endif /* Debug Registers */ struct debug_reg debug; +#ifdef CONFIG_PPC_FPU_REGS struct thread_fp_state fp_state; struct thread_fp_state *fp_save_area; +#endif int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -230,10 +226,6 @@ struct thread_struct { struct thread_vr_state ckvr_state; /* Checkpointed VR state */ unsigned long ckvrsave; /* Checkpointed VRSAVE */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -#ifdef CONFIG_PPC_MEM_KEYS - unsigned long amr; - unsigned long iamr; -#endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER void* kvm_shadow_vcpu; /* KVM internal data */ #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ @@ -344,7 +336,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) } #ifdef CONFIG_PPC64 -#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) #define spin_begin() HMT_low() @@ -363,8 +354,6 @@ do { \ } \ } while (0) -#else -#define cpu_relax() barrier() #endif /* Check that a certain kernel stack pointer is valid in task_struct p */ @@ -398,20 +387,6 @@ static inline void prefetchw(const void *x) #define HAVE_ARCH_PICK_MMAP_LAYOUT -#ifdef CONFIG_PPC64 -static inline unsigned long get_clean_sp(unsigned long sp, int is_32) -{ - if (is_32) - return sp & 0x0ffffffffUL; - return sp; -} -#else -static inline unsigned long get_clean_sp(unsigned long sp, int is_32) -{ - return sp; -} -#endif - /* asm stubs */ extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val); extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val); diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index cb89e4bf55ce..e646c7f218bc 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -378,8 +378,8 @@ struct ps3_system_bus_driver { enum ps3_match_sub_id match_sub_id; struct device_driver core; int (*probe)(struct ps3_system_bus_device *); - int (*remove)(struct ps3_system_bus_device *); - int (*shutdown)(struct ps3_system_bus_device *); + void (*remove)(struct ps3_system_bus_device *); + void (*shutdown)(struct ps3_system_bus_device *); /* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */ /* int (*resume)(struct ps3_system_bus_device *); */ }; diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index e2c778c176a3..58f9dc060a7b 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -53,11 +53,19 @@ struct pt_regs #ifdef CONFIG_PPC64 unsigned long ppr; #endif + union { #ifdef CONFIG_PPC_KUAP - unsigned long kuap; + unsigned long kuap; +#endif +#ifdef CONFIG_PPC_PKEY + unsigned long amr; +#endif + }; +#ifdef CONFIG_PPC_PKEY + unsigned long iamr; #endif }; - unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */ + unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */ }; }; #endif @@ -171,12 +179,6 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) set_thread_flag(TIF_NOERROR); \ } while(0) -struct task_struct; -extern int ptrace_get_reg(struct task_struct *task, int regno, - unsigned long *data); -extern int ptrace_put_reg(struct task_struct *task, int regno, - unsigned long data); - #define current_pt_regs() \ ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f877a576b338..e40a921d78f9 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -29,7 +29,6 @@ #include <asm/reg_8xx.h> #define MSR_SF_LG 63 /* Enable 64 bit mode */ -#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */ #define MSR_HV_LG 60 /* Hypervisor state */ #define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */ #define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */ @@ -69,13 +68,11 @@ #ifdef CONFIG_PPC64 #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */ -#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */ #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */ #define MSR_S __MASK(MSR_S_LG) /* Secure state */ #else /* so tests for these bits fail on 32-bit */ #define MSR_SF 0 -#define MSR_ISF 0 #define MSR_HV 0 #define MSR_S 0 #endif @@ -134,7 +131,7 @@ #define MSR_64BIT MSR_SF /* Server variant */ -#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV) #ifdef __BIG_ENDIAN__ #define MSR_ __MSR #define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV) @@ -864,6 +861,7 @@ #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ #define MMCR0_PMCC 0x000c0000UL /* PMC control */ +#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */ #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ #define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/ @@ -1203,7 +1201,7 @@ #ifdef CONFIG_PPC_BOOK3S_32 #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 -#define SPRN_SPRG_PGDIR SPRN_SPRG2 +#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 #define SPRN_SPRG_603_LRU SPRN_SPRG4 #endif @@ -1232,14 +1230,9 @@ #define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1 #define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R #define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W -#ifdef CONFIG_E200 -#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R -#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W -#else #define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9 #define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9 #endif -#endif #ifdef CONFIG_PPC_8xx #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 @@ -1419,37 +1412,6 @@ static inline void msr_check_and_clear(unsigned long bits) __msr_check_and_clear(bits); } -#if defined(CONFIG_PPC_CELL) || defined(CONFIG_E500) -#define mftb() ({unsigned long rval; \ - asm volatile( \ - "90: mfspr %0, %2;\n" \ - ASM_FTR_IFSET( \ - "97: cmpwi %0,0;\n" \ - " beq- 90b;\n", "", %1) \ - : "=r" (rval) \ - : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ - rval;}) -#elif defined(CONFIG_PPC_8xx) -#define mftb() ({unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); rval;}) -#else -#define mftb() ({unsigned long rval; \ - asm volatile("mfspr %0, %1" : \ - "=r" (rval) : "i" (SPRN_TBRL)); rval;}) -#endif /* !CONFIG_PPC_CELL */ - -#if defined(CONFIG_PPC_8xx) -#define mftbu() ({unsigned long rval; \ - asm volatile("mftbu %0" : "=r" (rval)); rval;}) -#else -#define mftbu() ({unsigned long rval; \ - asm volatile("mfspr %0, %1" : "=r" (rval) : \ - "i" (SPRN_TBRU)); rval;}) -#endif - -#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) -#define mttbu(v) asm volatile("mttbu %0":: "r"(v)) - #ifdef CONFIG_PPC32 #define mfsrin(v) ({unsigned int rval; \ asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 29a948e0c0f2..262782f08fd4 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -281,18 +281,6 @@ #define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */ #endif -#ifdef CONFIG_E200 -#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ -#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */ -#define MCSR_CPERR 0x10000000UL /* Cache Parity Error */ -#define MCSR_EXCP_ERR 0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn - fetch for an exception handler */ -#define MCSR_BUS_IRERR 0x00000010UL /* Read Bus Error on instruction fetch*/ -#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */ -#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered - store or cache line push */ -#endif - /* Bit definitions for the HID1 */ #ifdef CONFIG_E500 /* e500v1/v2 */ diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h index aa420561bc10..8df6235d64d1 100644 --- a/arch/powerpc/include/asm/rtas-types.h +++ b/arch/powerpc/include/asm/rtas-types.h @@ -23,14 +23,6 @@ struct rtas_t { struct device_node *dev; /* virtual address pointer */ }; -struct rtas_suspend_me_data { - atomic_t working; /* number of cpus accessing this struct */ - atomic_t done; - int token; /* ibm,suspend-me */ - atomic_t error; - struct completion *complete; /* wait on this until working == 0 */ -}; - struct rtas_error_log { /* Byte 0 */ u8 byte0; /* Architectural version */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 55f9a154c95d..332e1000ca0f 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -23,11 +23,16 @@ #define RTAS_RMOBUF_MAX (64 * 1024) /* RTAS return status codes */ -#define RTAS_NOT_SUSPENDABLE -9004 #define RTAS_BUSY -2 /* RTAS Busy */ #define RTAS_EXTENDED_DELAY_MIN 9900 #define RTAS_EXTENDED_DELAY_MAX 9905 +/* statuses specific to ibm,suspend-me */ +#define RTAS_SUSPEND_ABORTED 9000 /* Suspension aborted */ +#define RTAS_NOT_SUSPENDABLE -9004 /* Partition not suspendable */ +#define RTAS_THREADS_ACTIVE -9005 /* Multiple processor threads active */ +#define RTAS_OUTSTANDING_COPROC -9006 /* Outstanding coprocessor operations */ + /* * In general to call RTAS use rtas_token("string") to lookup * an RTAS token for the given string (e.g. "event-scan"). @@ -242,6 +247,7 @@ extern void __noreturn rtas_restart(char *cmd); extern void rtas_power_off(void); extern void __noreturn rtas_halt(void); extern void rtas_os_term(char *str); +void rtas_activate_firmware(void); extern int rtas_get_sensor(int sensor, int index, int *state); extern int rtas_get_sensor_fast(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); @@ -250,9 +256,7 @@ extern bool rtas_indicator_present(int token, int *maxindex); extern int rtas_set_indicator(int indicator, int index, int new_value); extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); -extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); -extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); -extern int rtas_ibm_suspend_me(u64 handle); +int rtas_ibm_suspend_me(int *fw_status); struct rtc_time; extern time64_t rtas_get_boot_time(void); @@ -272,8 +276,13 @@ extern time64_t last_rtas_event; extern int clobbering_unread_rtas_event(void); extern int pseries_devicetree_update(s32 scope); extern void post_mobility_fixup(void); +int rtas_syscall_dispatch_ibm_suspend_me(u64 handle); #else static inline int clobbering_unread_rtas_event(void) { return 0; } +static inline int rtas_syscall_dispatch_ibm_suspend_me(u64 handle) +{ + return -EINVAL; +} #endif #ifdef CONFIG_PPC_RTAS_DAEMON diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index b2035b2f57ce..c4e2d53acd2b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -134,6 +134,7 @@ static inline struct cpumask *cpu_smallcore_mask(int cpu) extern int cpu_to_core_id(int cpu); extern bool has_big_cores; +extern bool thread_group_shares_l2; #define cpu_smt_mask cpu_smt_mask #ifdef CONFIG_SCHED_SMT @@ -187,6 +188,7 @@ extern void __cpu_die(unsigned int cpu); /* for UP */ #define hard_smp_processor_id() get_hard_smp_processor_id(0) #define smp_setup_cpu_maps() +#define thread_group_shares_l2 0 static inline void inhibit_secondary_onlining(void) {} static inline void uninhibit_secondary_onlining(void) {} static inline const struct cpumask *cpu_sibling_mask(int cpu) @@ -199,6 +201,10 @@ static inline const struct cpumask *cpu_smallcore_mask(int cpu) return cpumask_of(cpu); } +static inline const struct cpumask *cpu_l2_cache_mask(int cpu) +{ + return cpumask_of(cpu); +} #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 53115ae61495..3d8a47af7a25 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -77,10 +77,8 @@ struct thread_info { /* how to get the thread information struct from C */ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); -#ifdef CONFIG_PPC_BOOK3S_64 void arch_setup_new_exec(void); #define arch_setup_new_exec arch_setup_new_exec -#endif #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 2f566c1a754c..8f789b597bae 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -15,6 +15,7 @@ #include <asm/processor.h> #include <asm/cpu_has_feature.h> +#include <asm/vdso/timebase.h> /* time.c */ extern unsigned long tb_ticks_per_jiffy; @@ -38,42 +39,12 @@ struct div_result { u64 result_low; }; -/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ -static inline unsigned long get_tbl(void) -{ - return mftb(); -} - static inline u64 get_vtb(void) { -#ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_ARCH_207S)) return mfspr(SPRN_VTB); -#endif - return 0; -} - -static inline u64 get_tb(void) -{ - unsigned int tbhi, tblo, tbhi2; - - if (IS_ENABLED(CONFIG_PPC64)) - return mftb(); - do { - tbhi = mftbu(); - tblo = mftb(); - tbhi2 = mftbu(); - } while (tbhi != tbhi2); - - return ((u64)tbhi << 32) | tblo; -} - -static inline void set_tb(unsigned int upper, unsigned int lower) -{ - mtspr(SPRN_TBWL, 0); - mtspr(SPRN_TBWU, upper); - mtspr(SPRN_TBWL, lower); + return 0; } /* Accessor functions for the decrementer register. diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 95988870a57b..fa2e76e4093a 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -9,7 +9,7 @@ */ #include <asm/cputable.h> -#include <asm/reg.h> +#include <asm/vdso/timebase.h> #define CLOCK_TICK_RATE 1024000 /* Underlying HZ */ diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index d97f061fecac..160422a439aa 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -40,9 +40,6 @@ extern void tlb_flush(struct mmu_gather *tlb); /* Get the generic bits... */ #include <asm-generic/tlb.h> -extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, - unsigned long address); - static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) { diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 2ff884853f97..8542e9bbeead 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -1,12 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PPC64_VDSO_H__ -#define __PPC64_VDSO_H__ - -#ifdef __KERNEL__ - -/* Default link addresses for the vDSOs */ -#define VDSO32_LBASE 0x0 -#define VDSO64_LBASE 0x0 +#ifndef _ASM_POWERPC_VDSO_H +#define _ASM_POWERPC_VDSO_H /* Default map addresses for 32bit vDSO */ #define VDSO32_MBASE 0x100000 @@ -15,10 +9,17 @@ #ifndef __ASSEMBLY__ -/* Offsets relative to thread->vdso_base */ -extern unsigned long vdso64_rt_sigtramp; -extern unsigned long vdso32_sigtramp; -extern unsigned long vdso32_rt_sigtramp; +#ifdef CONFIG_PPC64 +#include <generated/vdso64-offsets.h> +#endif + +#ifdef CONFIG_VDSO32 +#include <generated/vdso32-offsets.h> +#endif + +#define VDSO64_SYMBOL(base, name) ((unsigned long)(base) + (vdso64_offset_##name)) + +#define VDSO32_SYMBOL(base, name) ((unsigned long)(base) + (vdso32_offset_##name)) int vdso_getcpu_init(void); @@ -51,6 +52,4 @@ int vdso_getcpu_init(void); #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - -#endif /* __PPC64_VDSO_H__ */ +#endif /* _ASM_POWERPC_VDSO_H */ diff --git a/arch/powerpc/include/asm/vdso/clocksource.h b/arch/powerpc/include/asm/vdso/clocksource.h new file mode 100644 index 000000000000..c1ba56b82ee5 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/clocksource.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_VDSO_CLOCKSOURCE_H +#define _ASM_POWERPC_VDSO_CLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES VDSO_CLOCKMODE_ARCHTIMER + +#endif diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000000..81671aa365b3 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/gettimeofday.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H +#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H + +#ifdef __ASSEMBLY__ + +#include <asm/ppc_asm.h> + +/* + * The macros sets two stack frames, one for the caller and one for the callee + * because there are no requirement for the caller to set a stack frame when + * calling VDSO so it may have omitted to set one, especially on PPC64 + */ + +.macro cvdso_call funct + .cfi_startproc + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + mflr r0 + .cfi_register lr, r0 + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) +#endif + get_datapage r5 + addi r5, r5, VDSO_DATA_OFFSET + bl DOTSYM(\funct) + PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) +#endif + cmpwi r3, 0 + mtlr r0 + .cfi_restore lr + addi r1, r1, 2 * PPC_MIN_STKFRM + crclr so + beqlr+ + crset so + neg r3, r3 + blr + .cfi_endproc +.endm + +.macro cvdso_call_time funct + .cfi_startproc + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + mflr r0 + .cfi_register lr, r0 + PPC_STLU r1, -PPC_MIN_STKFRM(r1) + PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) +#endif + get_datapage r4 + addi r4, r4, VDSO_DATA_OFFSET + bl DOTSYM(\funct) + PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) +#ifdef __powerpc64__ + PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) +#endif + crclr so + mtlr r0 + .cfi_restore lr + addi r1, r1, 2 * PPC_MIN_STKFRM + blr + .cfi_endproc +.endm + +#else + +#include <asm/vdso/timebase.h> +#include <asm/barrier.h> +#include <asm/unistd.h> +#include <uapi/linux/time.h> + +#define VDSO_HAS_CLOCK_GETRES 1 + +#define VDSO_HAS_TIME 1 + +static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3, + const unsigned long _r4) +{ + register long r0 asm("r0") = _r0; + register unsigned long r3 asm("r3") = _r3; + register unsigned long r4 asm("r4") = _r4; + register int ret asm ("r3"); + + asm volatile( + " sc\n" + " bns+ 1f\n" + " neg %0, %0\n" + "1:\n" + : "=r" (ret), "+r" (r4), "+r" (r0) + : "r" (r3) + : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr"); + + return ret; +} + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz) +{ + return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz); +} + +static __always_inline +int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts); +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts); +} + +#ifdef CONFIG_VDSO32 + +#define BUILD_VDSO32 1 + +static __always_inline +int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts); +} + +static __always_inline +int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts); +} +#endif + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ + return get_tb(); +} + +const struct vdso_data *__arch_get_vdso_data(void); + +static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +{ + return true; +} +#define vdso_clocksource_ok vdso_clocksource_ok + +/* + * powerpc specific delta calculation. + * + * This variant removes the masking of the subtraction because the + * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX + * which would result in a pointless operation. The compiler cannot + * optimize it away as the mask comes from the vdso data and is not compile + * time constant. + */ +static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +{ + return (cycles - last) * mult; +} +#define vdso_calc_delta vdso_calc_delta + +#ifndef __powerpc64__ +static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift) +{ + u32 hi = ns >> 32; + u32 lo = ns; + + lo >>= shift; + lo |= hi << (32 - shift); + hi >>= shift; + + if (likely(hi == 0)) + return lo; + + return ((u64)hi << 32) | lo; +} +#define vdso_shift_ns vdso_shift_ns +#endif + +#ifdef __powerpc64__ +int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, + const struct vdso_data *vd); +int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, + const struct vdso_data *vd); +#else +int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, + const struct vdso_data *vd); +int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, + const struct vdso_data *vd); +int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, + const struct vdso_data *vd); +#endif +int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, + const struct vdso_data *vd); +__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, + const struct vdso_data *vd); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h new file mode 100644 index 000000000000..e072577bc7c0 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/processor.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_VDSO_PROCESSOR_H +#define _ASM_POWERPC_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +/* Macros for adjusting thread priority (hardware multi-threading) */ +#define HMT_very_low() asm volatile("or 31, 31, 31 # very low priority") +#define HMT_low() asm volatile("or 1, 1, 1 # low priority") +#define HMT_medium_low() asm volatile("or 6, 6, 6 # medium low priority") +#define HMT_medium() asm volatile("or 2, 2, 2 # medium priority") +#define HMT_medium_high() asm volatile("or 5, 5, 5 # medium high priority") +#define HMT_high() asm volatile("or 3, 3, 3 # high priority") + +#ifdef CONFIG_PPC64 +#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) +#else +#define cpu_relax() barrier() +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_VDSO_PROCESSOR_H */ diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h new file mode 100644 index 000000000000..b558b07959ce --- /dev/null +++ b/arch/powerpc/include/asm/vdso/timebase.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Common timebase prototypes and such for all ppc machines. + */ + +#ifndef _ASM_POWERPC_VDSO_TIMEBASE_H +#define _ASM_POWERPC_VDSO_TIMEBASE_H + +#include <asm/reg.h> + +/* + * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit + * version below in the else case of the ifdef. + */ +#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_E500)) +#define mftb() ({unsigned long rval; \ + asm volatile( \ + "90: mfspr %0, %2;\n" \ + ASM_FTR_IFSET( \ + "97: cmpwi %0,0;\n" \ + " beq- 90b;\n", "", %1) \ + : "=r" (rval) \ + : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ + rval;}) +#elif defined(CONFIG_PPC_8xx) +#define mftb() ({unsigned long rval; \ + asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#else +#define mftb() ({unsigned long rval; \ + asm volatile("mfspr %0, %1" : \ + "=r" (rval) : "i" (SPRN_TBRL)); rval;}) +#endif /* !CONFIG_PPC_CELL */ + +#if defined(CONFIG_PPC_8xx) +#define mftbu() ({unsigned long rval; \ + asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#else +#define mftbu() ({unsigned long rval; \ + asm volatile("mfspr %0, %1" : "=r" (rval) : \ + "i" (SPRN_TBRU)); rval;}) +#endif + +#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) +#define mttbu(v) asm volatile("mttbu %0":: "r"(v)) + +/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */ +static inline unsigned long get_tbl(void) +{ + return mftb(); +} + +static inline u64 get_tb(void) +{ + unsigned int tbhi, tblo, tbhi2; + + /* + * We use __powerpc64__ here not CONFIG_PPC64 because we want the compat + * VDSO to use the 32-bit compatible version in the while loop below. + */ + if (__is_defined(__powerpc64__)) + return mftb(); + + do { + tbhi = mftbu(); + tblo = mftb(); + tbhi2 = mftbu(); + } while (tbhi != tbhi2); + + return ((u64)tbhi << 32) | tblo; +} + +static inline void set_tb(unsigned int upper, unsigned int lower) +{ + mtspr(SPRN_TBWL, 0); + mtspr(SPRN_TBWU, upper); + mtspr(SPRN_TBWL, lower); +} + +#endif /* _ASM_POWERPC_VDSO_TIMEBASE_H */ diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000000..48cf23f1e273 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/vsyscall.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_VDSO_VSYSCALL_H +#define _ASM_POWERPC_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <linux/timekeeper_internal.h> +#include <asm/vdso_datapage.h> + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__arch_get_k_vdso_data(void) +{ + return vdso_data->data; +} +#define __arch_get_k_vdso_data __arch_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_VDSO_VSYSCALL_H */ diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index b9ef6cf50ea5..3f958ecf2beb 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -36,6 +36,7 @@ #include <linux/unistd.h> #include <linux/time.h> +#include <vdso/datapage.h> #define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) @@ -45,7 +46,7 @@ #ifdef CONFIG_PPC64 -struct vdso_data { +struct vdso_arch_data { __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */ struct { /* Systemcfg version numbers */ __u32 major; /* Major number 0x10 */ @@ -59,13 +60,13 @@ struct vdso_data { __u32 processor; /* Processor type 0x1C */ __u64 processorCount; /* # of physical processors 0x20 */ __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */ - __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ + __u64 tb_orig_stamp; /* (NU) Timebase at boot 0x30 */ __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ - __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ - __u64 stamp_xsec; /* 0x48 */ - __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */ - __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ - __u32 tz_dsttime; /* Type of dst correction 0x5C */ + __u64 tb_to_xs; /* (NU) Inverse of TB to 2^20 0x40 */ + __u64 stamp_xsec; /* (NU) 0x48 */ + __u64 tb_update_count; /* (NU) Timebase atomicity ctr 0x50 */ + __u32 tz_minuteswest; /* (NU) Min. west of Greenwich 0x58 */ + __u32 tz_dsttime; /* (NU) Type of dst correction 0x5C */ __u32 dcache_size; /* L1 d-cache size 0x60 */ __u32 dcache_line_size; /* L1 d-cache line size 0x64 */ __u32 icache_size; /* L1 i-cache size 0x68 */ @@ -78,14 +79,10 @@ struct vdso_data { __u32 icache_block_size; /* L1 i-cache block size */ __u32 dcache_log_block_size; /* L1 d-cache log block size */ __u32 icache_log_block_size; /* L1 i-cache log block size */ - __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ - __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */ - __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ - __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */ - __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ - __u32 hrtimer_res; /* hrtimer resolution */ - __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ - __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ + + struct vdso_data data[CS_BASES]; }; #else /* CONFIG_PPC64 */ @@ -93,35 +90,27 @@ struct vdso_data { /* * And here is the simpler 32 bits version */ -struct vdso_data { - __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ +struct vdso_arch_data { __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ - __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ - __u64 stamp_xsec; /* 0x48 */ - __u32 tb_update_count; /* Timebase atomicity ctr 0x50 */ - __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ - __u32 tz_dsttime; /* Type of dst correction 0x5C */ - __s32 wtom_clock_sec; /* Wall to monotonic clock */ - __s32 wtom_clock_nsec; - __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */ - __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ - __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ - __u32 hrtimer_res; /* hrtimer resolution */ - __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ + struct vdso_data data[CS_BASES]; }; #endif /* CONFIG_PPC64 */ -extern struct vdso_data *vdso_data; +extern struct vdso_arch_data *vdso_data; #else /* __ASSEMBLY__ */ -.macro get_datapage ptr, tmp +.macro get_datapage ptr bcl 20, 31, .+4 +999: mflr \ptr - addi \ptr, \ptr, (__kernel_datapage_offset - (.-4))@l - lwz \tmp, 0(\ptr) - add \ptr, \tmp, \ptr +#if CONFIG_PPC_PAGE_SHIFT > 14 + addis \ptr, \ptr, (_vdso_datapage - 999b)@ha +#endif + addi \ptr, \ptr, (_vdso_datapage - 999b)@l .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 309b4d65b74f..9a312b975ca8 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -60,13 +60,13 @@ struct xive_irq_data { }; #define XIVE_IRQ_FLAG_STORE_EOI 0x01 #define XIVE_IRQ_FLAG_LSI 0x02 -#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 -#define XIVE_IRQ_FLAG_MASK_FW 0x08 -#define XIVE_IRQ_FLAG_EOI_FW 0x10 +/* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */ +/* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */ +/* #define XIVE_IRQ_FLAG_EOI_FW 0x10 */ /* P9 DD1.0 workaround */ #define XIVE_IRQ_FLAG_H_INT_ESB 0x20 /* Special flag set by KVM for excalation interrupts */ -#define XIVE_IRQ_NO_EOI 0x80 +#define XIVE_IRQ_FLAG_NO_EOI 0x80 #define XIVE_INVALID_CHIP_ID -1 |