diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2021-03-16 11:41:57 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-04-14 15:04:20 +0200 |
commit | dc6231821a148d0392292924fdae5b34679af6b2 (patch) | |
tree | 6622b529c78d009917467ec174946edf60867556 /arch | |
parent | powerpc/64e/interrupt: always save nvgprs on interrupt (diff) | |
download | linux-dc6231821a148d0392292924fdae5b34679af6b2.tar.xz linux-dc6231821a148d0392292924fdae5b34679af6b2.zip |
powerpc/interrupt: update common interrupt code for
This makes adjustments to 64-bit asm and common C interrupt return
code to be usable by the 64e subarchitecture.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210316104206.407354-4-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/interrupt.c | 35 |
2 files changed, 28 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 853534b2ae2e..555b3d0a3f38 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -632,7 +632,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) addi r1,r1,SWITCH_FRAME_SIZE blr -#ifdef CONFIG_PPC_BOOK3S /* * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * touched, no exit work created, then this can be used. @@ -644,6 +643,7 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) kuap_check_amr r3, r4 ld r5,_MSR(r1) andi. r0,r5,MSR_PR +#ifdef CONFIG_PPC_BOOK3S bne .Lfast_user_interrupt_return_amr kuap_kernel_restore r3, r4 andi. r0,r5,MSR_RI @@ -652,6 +652,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return) addi r3,r1,STACK_FRAME_OVERHEAD bl unrecoverable_exception b . /* should not get here */ +#else + bne .Lfast_user_interrupt_return + b .Lfast_kernel_interrupt_return +#endif .balign IFETCH_ALIGN_BYTES .globl interrupt_return @@ -665,8 +669,10 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) cmpdi r3,0 bne- .Lrestore_nvgprs +#ifdef CONFIG_PPC_BOOK3S .Lfast_user_interrupt_return_amr: kuap_user_restore r3, r4 +#endif .Lfast_user_interrupt_return: ld r11,_NIP(r1) ld r12,_MSR(r1) @@ -775,7 +781,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) RFI_TO_KERNEL b . /* prevent speculative execution */ -#endif /* CONFIG_PPC_BOOK3S */ #ifdef CONFIG_PPC_RTAS /* diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index fbabb49888d3..381a618b5b5b 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -235,6 +235,10 @@ static notrace void booke_load_dbcr0(void) #endif } +/* temporary hack for context tracking, removed in later patch */ +#include <linux/sched/debug.h> +asmlinkage __visible void __sched schedule_user(void); + /* * This should be called after a syscall returns, with r3 the return value * from the syscall. If this function returns non-zero, the system call @@ -292,7 +296,11 @@ again: while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); if (ti_flags & _TIF_NEED_RESCHED) { +#ifdef CONFIG_PPC_BOOK3E_64 + schedule_user(); +#else schedule(); +#endif } else { /* * SIGPENDING must restore signal handler function @@ -349,18 +357,13 @@ again: account_cpu_user_exit(); -#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not using this */ - /* - * We do this at the end so that we do context switch with KERNEL AMR - */ + /* Restore user access locks last */ kuap_user_restore(regs); -#endif kuep_unlock(); return ret; } -#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr) { unsigned long ti_flags; @@ -372,7 +375,9 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned BUG_ON(!(regs->msr & MSR_PR)); BUG_ON(!FULL_REGS(regs)); BUG_ON(arch_irq_disabled_regs(regs)); +#ifdef CONFIG_PPC_BOOK3S_64 CT_WARN_ON(ct_state() == CONTEXT_USER); +#endif /* * We don't need to restore AMR on the way back to userspace for KUAP. @@ -387,7 +392,11 @@ again: while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); /* returning to user: may enable */ if (ti_flags & _TIF_NEED_RESCHED) { +#ifdef CONFIG_PPC_BOOK3E_64 + schedule_user(); +#else schedule(); +#endif } else { if (ti_flags & _TIF_SIGPENDING) ret |= _TIF_RESTOREALL; @@ -432,10 +441,9 @@ again: account_cpu_user_exit(); - /* - * We do this at the end so that we do context switch with KERNEL AMR - */ + /* Restore user access locks last */ kuap_user_restore(regs); + return ret; } @@ -456,7 +464,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != 0x700) + if (IS_ENABLED(CONFIG_BOOKS) && TRAP(regs) != 0x700) CT_WARN_ON(ct_state() == CONTEXT_USER); kuap = kuap_get_and_assert_locked(); @@ -497,12 +505,11 @@ again: #endif /* - * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr, - * which would cause Read-After-Write stalls. Hence, we take the AMR - * value from the check above. + * 64s does not want to mfspr(SPRN_AMR) here, because this comes after + * mtmsr, which would cause Read-After-Write stalls. Hence, take the + * AMR value from the check above. */ kuap_kernel_restore(regs, kuap); return ret; } -#endif |