summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r--arch/powerpc/mm/fault.c92
1 files changed, 61 insertions, 31 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 84af6c8eecf7..2393ed9d84bb 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -41,18 +41,19 @@
#include <asm/siginfo.h>
#include <asm/debug.h>
#include <asm/kup.h>
+#include <asm/inst.h>
/*
* Check whether the instruction inst is a store using
* an update addressing form which will update r1.
*/
-static bool store_updates_sp(unsigned int inst)
+static bool store_updates_sp(struct ppc_inst inst)
{
/* check for 1 in the rA field */
- if (((inst >> 16) & 0x1f) != 1)
+ if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1)
return false;
/* check major opcode */
- switch (inst >> 26) {
+ switch (ppc_inst_primary_opcode(inst)) {
case OP_STWU:
case OP_STBU:
case OP_STHU:
@@ -60,10 +61,10 @@ static bool store_updates_sp(unsigned int inst)
case OP_STFDU:
return true;
case OP_STD: /* std or stdu */
- return (inst & 3) == 1;
+ return (ppc_inst_val(inst) & 3) == 1;
case OP_31:
/* check minor opcode */
- switch ((inst >> 1) & 0x3ff) {
+ switch ((ppc_inst_val(inst) >> 1) & 0x3ff) {
case OP_31_XOP_STDUX:
case OP_31_XOP_STWUX:
case OP_31_XOP_STBUX:
@@ -118,9 +119,34 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
return __bad_area(regs, address, SEGV_MAPERR);
}
-static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
- int pkey)
+#ifdef CONFIG_PPC_MEM_KEYS
+static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
+ struct vm_area_struct *vma)
{
+ struct mm_struct *mm = current->mm;
+ int pkey;
+
+ /*
+ * We don't try to fetch the pkey from page table because reading
+ * page table without locking doesn't guarantee stable pte value.
+ * Hence the pkey value that we return to userspace can be different
+ * from the pkey that actually caused access error.
+ *
+ * It does *not* guarantee that the VMA we find here
+ * was the one that we faulted on.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set AMR to deny access to pkey=4, touches, page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_sem, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
+ pkey = vma_pkey(vma);
+
+ up_read(&mm->mmap_sem);
+
/*
* If we are in kernel mode, bail out with a SEGV, this will
* be caught by the assembly which will restore the non-volatile
@@ -133,6 +159,7 @@ static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
return 0;
}
+#endif
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
{
@@ -255,7 +282,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
- unsigned int __user *nip = (unsigned int __user *)regs->nip;
+ struct ppc_inst __user *nip = (struct ppc_inst __user *)regs->nip;
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
@@ -278,9 +305,9 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
access_ok(nip, sizeof(*nip))) {
- unsigned int inst;
+ struct ppc_inst inst;
- if (!probe_user_read(&inst, nip, sizeof(inst)))
+ if (!probe_user_read_inst(&inst, nip))
return !store_updates_sp(inst);
*must_retry = true;
}
@@ -289,8 +316,23 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
return false;
}
-static bool access_error(bool is_write, bool is_exec,
- struct vm_area_struct *vma)
+#ifdef CONFIG_PPC_MEM_KEYS
+static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
+ struct vm_area_struct *vma)
+{
+ /*
+ * Make sure to check the VMA so that we do not perform
+ * faults just to hit a pkey fault as soon as we fill in a
+ * page. Only called for current mm, hence foreign == 0
+ */
+ if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
+ return true;
+
+ return false;
+}
+#endif
+
+static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
{
/*
* Allow execution from readable areas if the MMU does not
@@ -483,10 +525,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & DSISR_KEYFAULT)
- return bad_key_fault_exception(regs, address,
- get_mm_addr_key(mm, address));
-
/*
* We want to do this outside mmap_sem, because reading code around nip
* can result in fault, which will cause a deadlock when called with
@@ -555,6 +593,13 @@ retry:
return bad_area(regs, address);
good_area:
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (unlikely(access_pkey_error(is_write, is_exec,
+ (error_code & DSISR_KEYFAULT), vma)))
+ return bad_access_pkey(regs, address, vma);
+#endif /* CONFIG_PPC_MEM_KEYS */
+
if (unlikely(access_error(is_write, is_exec, vma)))
return bad_access(regs, address);
@@ -565,21 +610,6 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
-#ifdef CONFIG_PPC_MEM_KEYS
- /*
- * we skipped checking for access error due to key earlier.
- * Check that using handle_mm_fault error return.
- */
- if (unlikely(fault & VM_FAULT_SIGSEGV) &&
- !arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
-
- int pkey = vma_pkey(vma);
-
- up_read(&mm->mmap_sem);
- return bad_key_fault_exception(regs, address, pkey);
- }
-#endif /* CONFIG_PPC_MEM_KEYS */
-
major |= fault & VM_FAULT_MAJOR;
if (fault_signal_pending(fault, regs))