summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h9
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h45
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h195
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h41
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h38
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/code-patching.h34
-rw-r--r--arch/powerpc/include/asm/head-64.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_irq.h23
-rw-r--r--arch/powerpc/include/asm/inst.h94
-rw-r--r--arch/powerpc/include/asm/interrupt.h67
-rw-r--r--arch/powerpc/include/asm/kup.h50
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h4
-rw-r--r--arch/powerpc/include/asm/livepatch.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h19
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h46
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h1
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h84
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h15
-rw-r--r--arch/powerpc/include/asm/probes.h4
-rw-r--r--arch/powerpc/include/asm/processor.h21
-rw-r--r--arch/powerpc/include/asm/ps3.h4
-rw-r--r--arch/powerpc/include/asm/ptrace.h50
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/security_features.h4
-rw-r--r--arch/powerpc/include/asm/set_memory.h34
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/sstep.h7
-rw-r--r--arch/powerpc/include/asm/uprobes.h4
-rw-r--r--arch/powerpc/include/asm/vas.h109
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h6
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h6
40 files changed, 736 insertions, 335 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 02ee6f5ac9fe..222823861a67 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -71,8 +71,13 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 7ae29cfb06c0..f0e687236484 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
deleted file mode 100644
index 2a0a467d2985..000000000000
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
-#define _ASM_POWERPC_BOOK3S_32_HASH_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 1670dfe9d4f1..64201125a287 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -7,35 +7,104 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
+extern struct static_key_false disable_kuap_key;
+extern struct static_key_false disable_kuep_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
+}
+
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
+}
+
+static inline void kuep_lock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) | SR_NX);
+}
+
+static inline void kuep_unlock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) & ~SR_NX);
+}
+
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
-static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static inline void kuap_lock_one(unsigned long addr)
{
- addr &= 0xf0000000; /* align addr to start of segment */
- barrier(); /* make sure thread.kuap is updated before playing with SRs */
- while (addr < end) {
- mtsr(sr, addr);
- sr += 0x111; /* next VSID */
- sr &= 0xf0ffffff; /* clear VSID overflow */
- addr += 0x10000000; /* address of next segment */
- }
+ mtsr(mfsr(addr) | SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
isync(); /* Context sync required after mtsr() */
}
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
- u32 addr = kuap & 0xf0000000;
- u32 end = kuap << 28;
+
+ if (kuap_is_disabled())
+ return;
regs->kuap = kuap;
- if (unlikely(!kuap))
+ if (unlikely(kuap == KUAP_NONE))
return;
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
@@ -44,22 +113,22 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
- u32 addr = regs->kuap & 0xf0000000;
- u32 end = regs->kuap << 28;
+ if (kuap_is_disabled())
+ return;
current->thread.kuap = regs->kuap;
- if (unlikely(regs->kuap == kuap))
- return;
-
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_unlock(regs->kuap, false);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
- WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
@@ -72,84 +141,78 @@ static inline void kuap_assert_locked(void)
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
- u32 addr, end;
+ if (kuap_is_disabled())
+ return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
if (!(dir & KUAP_WRITE))
return;
- addr = (__force u32)to;
-
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
-
- end = min(addr + size, TASK_SIZE);
-
- current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
}
-static __always_inline void prevent_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static __always_inline void prevent_user_access(unsigned long dir)
{
- u32 addr, end;
-
- BUILD_BUG_ON(!__builtin_constant_p(dir));
+ u32 kuap = current->thread.kuap;
- if (dir & KUAP_CURRENT_WRITE) {
- u32 kuap = current->thread.kuap;
-
- if (unlikely(!kuap))
- return;
+ if (kuap_is_disabled())
+ return;
- addr = kuap & 0xf0000000;
- end = kuap << 28;
- } else if (dir & KUAP_WRITE) {
- addr = (__force u32)to;
- end = min(addr + size, TASK_SIZE);
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
- } else {
+ if (!(dir & KUAP_WRITE))
return;
- }
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
- if (flags)
- prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(flags, true);
+ }
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
+ if (kuap_is_disabled())
+ return;
- if (flags)
- allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- unsigned long begin = regs->kuap & 0xf0000000;
- unsigned long end = regs->kuap << 28;
+ unsigned long kuap = regs->kuap;
+
+ if (kuap_is_disabled())
+ return false;
+
+ if (!is_write || kuap == KUAP_ALL)
+ return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
- return is_write && (address < begin || address >= end);
+ return false;
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index b85f8e114a9c..f5be185cbdf8 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -67,6 +67,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
+
+/*
* Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that
* support extended addressing; otherwise, those bits are reserved.
@@ -102,6 +112,37 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 83c65845a1a9..609c80f67194 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -4,7 +4,43 @@
#include <asm-generic/pgtable-nopmd.h>
-#include <asm/book3s/32/hash.h>
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 9700da3a4093..a1cc73a88710 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -398,8 +398,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#endif /* !CONFIG_PPC_KUAP */
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a666d561b44d..4d9941b2fe51 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,6 +232,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index d5da7ddbf0fc..350de8f90250 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -91,7 +91,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index f1d029bf906e..a95f63788c6b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,13 +23,13 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
-int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, struct ppc_inst instr);
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
@@ -38,18 +38,18 @@ static inline unsigned long patch_site_addr(s32 *site)
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
- return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -59,10 +59,8 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
-unsigned long branch_target(const struct ppc_inst *instr);
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
@@ -73,9 +71,9 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
-#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
-#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
static inline unsigned long ppc_function_entry(void *func)
@@ -180,12 +178,10 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
#define R2_STACK_OFFSET 40
#endif
-#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
- ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
-#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
- ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 4cb9efa2eb21..242204e12993 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
- .section ".head.text.\name\()"
+ .section ".head.text.\name\()","ax",@progbits
.endm
/*
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 7e4b2cef40c2..9bcf345cb208 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -294,6 +294,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
@@ -393,6 +400,9 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 56a98936a6a9..21cc571ea9c2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
- * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
- * is set whenever we manually hard disable.
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
@@ -389,7 +398,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline void may_hard_irq_enable(void) { }
+static inline bool may_hard_irq_enable(void)
+{
+ return false;
+}
+
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index 268d3bd073c8..b11c0e2f9639 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -8,17 +8,17 @@
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
- long __gui_ret = 0; \
- unsigned long __gui_ptr = (unsigned long)ptr; \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
struct ppc_inst __gui_inst; \
unsigned int __prefix, __suffix; \
- __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
if ((__prefix >> 26) == OP_PREFIX) { \
- __gui_ret = gu_op(__suffix, \
- (unsigned int __user *)__gui_ptr + 1); \
- __gui_inst = ppc_inst_prefix(__prefix, \
- __suffix); \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
__gui_inst = ppc_inst(__prefix); \
} \
@@ -29,14 +29,15 @@
})
#else /* !CONFIG_PPC64 */
#define ___get_user_instr(gu_op, dest, ptr) \
- gu_op((dest).val, (u32 __user *)(ptr))
+({ \
+ __chk_user_ptr(ptr); \
+ gu_op((dest).val, (u32 __user *)(ptr)); \
+})
#endif /* CONFIG_PPC64 */
-#define get_user_instr(x, ptr) \
- ___get_user_instr(get_user, x, ptr)
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
-#define __get_user_instr(x, ptr) \
- ___get_user_instr(__get_user, x, ptr)
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
/*
* Instruction data type for POWER
@@ -59,9 +60,9 @@ static inline int ppc_inst_primary_opcode(struct ppc_inst x)
return ppc_inst_val(x) >> 26;
}
-#ifdef CONFIG_PPC64
-#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
+#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
+#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
@@ -69,68 +70,43 @@ static inline u32 ppc_inst_suffix(struct ppc_inst x)
return x.suffix;
}
-static inline bool ppc_inst_prefixed(struct ppc_inst x)
-{
- return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
-}
+#else
+#define ppc_inst_prefix(x, y) ppc_inst(x)
-static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
- return ppc_inst_prefix(swab32(ppc_inst_val(x)),
- swab32(ppc_inst_suffix(x)));
+ return 0;
}
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- u32 val, suffix;
-
- val = *(u32 *)ptr;
- if ((val >> 26) == OP_PREFIX) {
- suffix = *((u32 *)ptr + 1);
- return ppc_inst_prefix(val, suffix);
- } else {
- return ppc_inst(val);
- }
-}
+#endif /* CONFIG_PPC64 */
-static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
{
- return *(u64 *)&x == *(u64 *)&y;
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
}
-#else
-
-#define ppc_inst(x) ((struct ppc_inst){ .val = x })
-
-#define ppc_inst_prefix(x, y) ppc_inst(x)
-
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
- return false;
-}
-
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
-{
- return 0;
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
{
- return ppc_inst(swab32(ppc_inst_val(x)));
-}
-
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- return *ptr;
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
{
- return ppc_inst_val(x) == ppc_inst_val(y);
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
-#endif /* CONFIG_PPC64 */
-
static inline int ppc_inst_len(struct ppc_inst x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
@@ -140,13 +116,13 @@ static inline int ppc_inst_len(struct ppc_inst x)
* Return the address of the next instruction, if the instruction @value was
* located at @location.
*/
-static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
tmp = ppc_inst_read(value);
- return location + ppc_inst_len(tmp);
+ return (void *)location + ppc_inst_len(tmp);
}
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
@@ -178,6 +154,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index 59f704408d65..d4bdf7d274ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -73,13 +73,47 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
+#endif
+
static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
/* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
- regs->nip = (unsigned long)power4_idle_nap_return;
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
}
#endif
}
@@ -129,9 +163,18 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != INTERRUPT_PROGRAM)
+ if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ BUG_ON(is_implicit_soft_masked(regs));
+ }
+#ifdef CONFIG_PPC_BOOK3S
+ /* Move this under a debugging check */
+ if (arch_irq_disabled_regs(regs))
+ BUG_ON(search_kernel_restart_table(regs->nip));
+#endif
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0();
@@ -186,6 +229,7 @@ struct interrupt_nmi_state {
u8 irq_soft_mask;
u8 irq_happened;
u8 ftrace_enabled;
+ u64 softe;
#endif
};
@@ -211,6 +255,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
#ifdef CONFIG_PPC64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
/*
* Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
@@ -220,12 +265,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
- regs->nip < (unsigned long)__end_interrupts) {
- // Kernel code running below __end_interrupts is
- // implicitly soft-masked.
+ if (is_implicit_soft_masked(regs)) {
+ // Adjust regs->softe soft implicit soft-mask, so
+ // arch_irq_disabled_regs(regs) behaves as expected.
regs->softe = IRQS_ALL_DISABLED;
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
/* Don't do any per-CPU operations until interrupt state is fixed */
@@ -258,11 +304,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
*/
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index ec96232529ac..1df763002726 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -5,14 +5,6 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
-/*
- * For prevent_user_access() only.
- * Use the current saved situation instead of the to/from/size params.
- * Used on book3s/32
- */
-#define KUAP_CURRENT_READ 4
-#define KUAP_CURRENT_WRITE 8
-#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
@@ -46,10 +38,7 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
-#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
-void kuep_lock(void);
-void kuep_unlock(void);
-#else
+#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
@@ -83,8 +72,7 @@ static inline unsigned long kuap_get_and_assert_locked(void)
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
+static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -96,53 +84,53 @@ static __always_inline void setup_kup(void)
setup_kuap(disable_kuap);
}
-static inline void allow_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
-static inline void allow_write_to_user(void __user *to, unsigned long size)
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
-static inline void allow_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
-static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
- prevent_user_access(NULL, from, size, KUAP_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_write_to_user(void __user *to, unsigned long size)
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
{
- prevent_user_access(to, NULL, size, KUAP_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
-static inline void prevent_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
- prevent_user_access(to, from, size, KUAP_READ_WRITE);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_access_user(void)
+static __always_inline void prevent_current_access_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_read_from_user(void)
+static __always_inline void prevent_current_read_from_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_current_write_to_user(void)
+static __always_inline void prevent_current_write_to_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
index 2fca299f7e19..c63105d2c9e7 100644
--- a/arch/powerpc/include/asm/kvm_guest.h
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
-bool check_kvm_guest(void);
+int check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
-static inline bool check_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index ae25e6e72997..4fe018cc207b 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
- regs->nip = ip;
+ regs_set_return_ip(regs, ip);
}
#define klp_get_ftrace_location klp_get_ftrace_location
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 607168b1aef4..27016b98ecb2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -220,7 +220,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
-#if defined(CONFIG_E200) || defined(CONFIG_E500)
+#ifdef CONFIG_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
@@ -324,7 +324,6 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
-#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
@@ -334,17 +333,6 @@ static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#else
-static inline bool radix_enabled(void)
-{
- return false;
-}
-
-static inline bool early_radix_enabled(void)
-{
- return false;
-}
-#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
@@ -357,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index db186c539d37..9ba6b585337f 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -57,7 +57,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 295ef5639609..882a0bc7887a 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -9,10 +9,22 @@
#ifndef __ASSEMBLY__
+#include <linux/jump_label.h>
+
#include <asm/reg.h>
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
+ if (kuap_is_disabled())
+ return;
+
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
@@ -23,12 +35,20 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, regs->kuap);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
- unsigned long kuap = mfspr(SPRN_MD_AP);
+ unsigned long kuap;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
@@ -38,25 +58,35 @@ static inline unsigned long kuap_get_and_assert_locked(void)
static inline void kuap_assert_locked(void)
{
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
kuap_get_and_assert_locked();
}
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
{
- unsigned long flags = mfspr(SPRN_MD_AP);
+ unsigned long flags;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@@ -65,12 +95,18 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
+ if (kuap_is_disabled())
+ return false;
+
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 2d92a39d8f2e..43ceca128531 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -113,6 +113,7 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec18ac818e3a..dc05a862e72a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -149,11 +149,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
@@ -167,9 +165,16 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 5969743719bc..d564d0ecd4cd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,6 +41,10 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
+
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
@@ -72,6 +76,7 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+void poking_init(void);
extern unsigned long ioremap_bot;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ac41776661e9..bede76dd3db7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -76,6 +76,40 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
@@ -222,13 +256,11 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
-#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MTMSRD 0x7c000164
-#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
@@ -241,10 +273,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
-#define PPC_INST_SC 0x44000002
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -252,18 +284,9 @@
#define PPC_INST_TSR 0x7c0005dd
#define PPC_INST_LD 0xe8000000
#define PPC_INST_STD 0xf8000000
-#define PPC_INST_MFLR 0x7c0802a6
-#define PPC_INST_MTCTR 0x7c0903a6
-#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_BLR 0x4e800020
-#define PPC_INST_BCTR 0x4e800420
-#define PPC_INST_BCTRL 0x4e800421
#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_RLDICR 0x78000004
-#define PPC_INST_ORI 0x60000000
-#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
@@ -323,6 +346,8 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -383,6 +408,10 @@
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
+
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
@@ -404,10 +433,10 @@
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_PLXVP(xtp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i)))
-#define PPC_RAW_PSTXVP(xsp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i)))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_NAP (0x4c000364)
#define PPC_RAW_SLEEP (0x4c0003a4)
#define PPC_RAW_WINKLE (0x4c0003e4)
@@ -445,16 +474,17 @@
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define PPC_RAW_NOP() (PPC_INST_NOP)
-#define PPC_RAW_BLR() (PPC_INST_BLR)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
-#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
-#define PPC_RAW_BCTR() (PPC_INST_BCTR)
-#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
-#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
-#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
@@ -499,8 +529,8 @@
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
-#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
-#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
@@ -519,7 +549,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
-#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
@@ -533,6 +563,8 @@
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index d6739d700f0a..116c1519728a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -762,6 +762,21 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 84dd1addd434..c5d984700d24 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
- regs->msr |= MSR_SINGLESTEP;
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
- regs->msr &= ~MSR_CE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 7bf8a15af224..f348e564f7dd 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -276,7 +276,15 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+}
+#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
.pgdir = swapper_pg_dir, \
@@ -339,17 +347,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define spin_end() HMT_medium()
-#define spin_until_cond(cond) \
-do { \
- if (unlikely(!(cond))) { \
- spin_begin(); \
- do { \
- spin_cpu_relax(); \
- } while (!(cond)); \
- spin_end(); \
- } \
-} while (0)
-
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index e646c7f218bc..8a0d8fb35328 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
+ u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index b476a685f066..3e5d470a6155 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -48,11 +48,12 @@ struct pt_regs
unsigned long result;
};
};
-
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
@@ -68,6 +69,7 @@ struct pt_regs
};
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
+#endif
};
#endif
@@ -122,6 +124,41 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
+#include <asm/paca.h>
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
+}
+
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+ set_return_regs_changed();
+}
+
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
+{
+ regs->msr = msr;
+ set_return_regs_changed();
+}
+
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
+{
+ regs_set_return_ip(regs, regs->nip + offset);
+}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
@@ -131,7 +168,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- regs->nip = val;
+ regs_set_return_ip(regs, val);
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
@@ -144,15 +181,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return 0;
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7c81d3e563b2..be85cf156a1f 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -393,6 +393,7 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
@@ -1435,8 +1436,6 @@ static inline void mtsr(u32 val, u32 idx)
}
#endif
-#define proc_trap() asm volatile("trap")
-
extern unsigned long current_stack_frame(void);
register unsigned long current_stack_pointer asm("r1");
@@ -1447,16 +1446,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
-
-static inline void update_power8_hid0(unsigned long hid0)
-{
- /*
- * The HID0 update on Power8 should at the very least be
- * preceded by a SYNC instruction followed by an ISYNC
- * instruction
- */
- asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
-}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index b774a4477d5f..792eefaf230b 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -92,6 +92,9 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
@@ -99,6 +102,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..b040094f7920
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
+
+#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e89bfebd4e00..6c1a7d217d1a 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -10,7 +10,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
-extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 972ed0df154d..1df867c2e054 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -13,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
- * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
- * and an mtmsrd (64-bit).
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
*/
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 5bf65f5d44a9..fe683371336f 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -24,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- struct ppc_inst insn;
- struct ppc_inst ixol;
+ u32 insn[2];
+ u32 ixol[2];
};
};
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index e33f80b0ea81..57573d9c1e09 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
-
-struct vas_window;
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
@@ -49,6 +51,64 @@ enum vas_cop_type {
};
/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
@@ -100,6 +160,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
+#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
@@ -162,6 +223,43 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
/*
* Register / unregister coprocessor type to VAS API which will be exported
* to user space. Applications can use this API to open / close window
@@ -171,7 +269,12 @@ int vas_paste_crb(struct vas_window *win, int offset, bool re);
* used for others in future.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name);
+ const char *name,
+ const struct vas_user_win_ops *vops);
void vas_unregister_coproc_api(void);
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 8e903b3f9c24..d9cf192368ad 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
+#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
index 50ef95e2f5b1..82488b1e7276 100644
--- a/arch/powerpc/include/uapi/asm/papr_pdsm.h
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -77,6 +77,9 @@
/* Indicate that the 'dimm_fuel_gauge' field is valid */
#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
/*
* Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
* Various flags indicate the health status of the dimm.
@@ -105,6 +108,9 @@ struct nd_papr_pdsm_health {
/* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
__u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
};
__u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
};
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
index ebd4b2424785..7c81301ecdba 100644
--- a/arch/powerpc/include/uapi/asm/vas-api.h
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -13,11 +13,15 @@
#define VAS_MAGIC 'v'
#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
struct vas_tx_win_open_attr {
__u32 version;
__s16 vas_id; /* specific instance of vas or -1 for default */
__u16 reserved1;
- __u64 flags; /* Future use */
+ __u64 flags;
__u64 reserved2[6];
};