diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2019-08-02 12:56:40 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-08-30 02:32:36 +0200 |
commit | fce16d482276f059b08368c833a1188ac1f25e86 (patch) | |
tree | 4961bc606ce91a190eb5f7d2e52fa2309424dd06 /arch | |
parent | powerpc/64s/exception: machine check move unrecoverable handling out of line (diff) | |
download | linux-fce16d482276f059b08368c833a1188ac1f25e86.tar.xz linux-fce16d482276f059b08368c833a1188ac1f25e86.zip |
powerpc/64s/exception: untangle early machine check handler branch
machine_check_early_common now branches to machine_check_handle_early
which is its only caller.
Move interleaving code out of the way, and remove the branch.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190802105709.27696-16-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 129 |
1 files changed, 62 insertions, 67 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index af18d0f1d4ab..3a7f18021365 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -956,6 +956,16 @@ TRAMP_REAL_BEGIN(machine_check_fwnmi) TRAMP_KVM_SKIP(PACA_EXMC, 0x200) +#define MACHINE_CHECK_HANDLER_WINDUP \ + /* Clear MSR_RI before setting SRR0 and SRR1. */\ + li r9,0; \ + mtmsrd r9,1; /* Clear MSR_RI */ \ + /* Decrement paca->in_mce now RI is clear. */ \ + lhz r12,PACA_IN_MCE(r13); \ + subi r12,r12,1; \ + sth r12,PACA_IN_MCE(r13); \ + EXCEPTION_RESTORE_REGS EXC_STD + EXC_COMMON_BEGIN(machine_check_early_common) mtctr r10 /* Restore ctr */ mfspr r11,SPRN_SRR0 @@ -1011,74 +1021,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) li r10,MSR_RI mtmsrd r10,1 - b machine_check_handle_early -EXC_COMMON_BEGIN(machine_check_common) - /* - * Machine check is different because we use a different - * save area: PACA_EXMC instead of PACA_EXGEN. - */ - EXCEPTION_COMMON(PACA_EXMC, 0x200) - FINISH_NAP - RECONCILE_IRQ_STATE(r10, r11) - ld r3,PACA_EXMC+EX_DAR(r13) - lwz r4,PACA_EXMC+EX_DSISR(r13) - /* Enable MSR_RI when finished with PACA_EXMC */ - li r10,MSR_RI - mtmsrd r10,1 - std r3,_DAR(r1) - std r4,_DSISR(r1) - bl save_nvgprs - addi r3,r1,STACK_FRAME_OVERHEAD - bl machine_check_exception - b ret_from_except - -#define MACHINE_CHECK_HANDLER_WINDUP \ - /* Clear MSR_RI before setting SRR0 and SRR1. */\ - li r9,0; \ - mtmsrd r9,1; /* Clear MSR_RI */ \ - /* Decrement paca->in_mce now RI is clear. */ \ - lhz r12,PACA_IN_MCE(r13); \ - subi r12,r12,1; \ - sth r12,PACA_IN_MCE(r13); \ - EXCEPTION_RESTORE_REGS EXC_STD - -#ifdef CONFIG_PPC_P7_NAP -/* - * This is an idle wakeup. Low level machine check has already been - * done. Queue the event then call the idle code to do the wake up. - */ -EXC_COMMON_BEGIN(machine_check_idle_common) - bl machine_check_queue_event - - /* - * We have not used any non-volatile GPRs here, and as a rule - * most exception code including machine check does not. - * Therefore PACA_NAPSTATELOST does not need to be set. Idle - * wakeup will restore volatile registers. - * - * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. - * - * Then decrement MCE nesting after finishing with the stack. - */ - ld r3,_MSR(r1) - ld r4,_LINK(r1) - - lhz r11,PACA_IN_MCE(r13) - subi r11,r11,1 - sth r11,PACA_IN_MCE(r13) - - mtlr r4 - rlwinm r10,r3,47-31,30,31 - cmpwi cr1,r10,2 - bltlr cr1 /* no state loss, return to idle caller */ - b idle_return_gpr_loss -#endif - /* - * Handle machine check early in real mode. We come here with - * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. - */ -EXC_COMMON_BEGIN(machine_check_handle_early) bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_early @@ -1157,6 +1100,58 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0 EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0 +EXC_COMMON_BEGIN(machine_check_common) + /* + * Machine check is different because we use a different + * save area: PACA_EXMC instead of PACA_EXGEN. + */ + EXCEPTION_COMMON(PACA_EXMC, 0x200) + FINISH_NAP + RECONCILE_IRQ_STATE(r10, r11) + ld r3,PACA_EXMC+EX_DAR(r13) + lwz r4,PACA_EXMC+EX_DSISR(r13) + /* Enable MSR_RI when finished with PACA_EXMC */ + li r10,MSR_RI + mtmsrd r10,1 + std r3,_DAR(r1) + std r4,_DSISR(r1) + bl save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl machine_check_exception + b ret_from_except + +#ifdef CONFIG_PPC_P7_NAP +/* + * This is an idle wakeup. Low level machine check has already been + * done. Queue the event then call the idle code to do the wake up. + */ +EXC_COMMON_BEGIN(machine_check_idle_common) + bl machine_check_queue_event + + /* + * We have not used any non-volatile GPRs here, and as a rule + * most exception code including machine check does not. + * Therefore PACA_NAPSTATELOST does not need to be set. Idle + * wakeup will restore volatile registers. + * + * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. + * + * Then decrement MCE nesting after finishing with the stack. + */ + ld r3,_MSR(r1) + ld r4,_LINK(r1) + + lhz r11,PACA_IN_MCE(r13) + subi r11,r11,1 + sth r11,PACA_IN_MCE(r13) + + mtlr r4 + rlwinm r10,r3,47-31,30,31 + cmpwi cr1,r10,2 + bltlr cr1 /* no state loss, return to idle caller */ + b idle_return_gpr_loss +#endif + EXC_COMMON_BEGIN(unrecoverable_mce) /* * We are going down. But there are chances that we might get hit by |