summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/exceptions-64s.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 21:54:34 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 21:54:34 +0200
commit019b3fd94ba73d3ac615f0537440b81f129821f6 (patch)
tree09657e8fd2ff04295381e9c6492553fe16c58ab4 /arch/powerpc/kernel/exceptions-64s.S
parentMerge tag 'asm-generic-unaligned-5.14' of git://git.kernel.org/pub/scm/linux/... (diff)
parentpowerpc: Only build restart_table.c for 64s (diff)
downloadlinux-019b3fd94ba73d3ac615f0537440b81f129821f6.tar.xz
linux-019b3fd94ba73d3ac615f0537440b81f129821f6.zip
Merge tag 'powerpc-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: - A big series refactoring parts of our KVM code, and converting some to C. - Support for ARCH_HAS_SET_MEMORY, and ARCH_HAS_STRICT_MODULE_RWX on some CPUs. - Support for the Microwatt soft-core. - Optimisations to our interrupt return path on 64-bit. - Support for userspace access to the NX GZIP accelerator on PowerVM on Power10. - Enable KUAP and KUEP by default on 32-bit Book3S CPUs. - Other smaller features, fixes & cleanups. Thanks to: Andy Shevchenko, Aneesh Kumar K.V, Arnd Bergmann, Athira Rajeev, Baokun Li, Benjamin Herrenschmidt, Bharata B Rao, Christophe Leroy, Daniel Axtens, Daniel Henrique Barboza, Finn Thain, Geoff Levand, Haren Myneni, Jason Wang, Jiapeng Chong, Joel Stanley, Jordan Niethe, Kajol Jain, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Paul Mackerras, Russell Currey, Sathvika Vasireddy, Shaokun Zhang, Stephen Rothwell, Sudeep Holla, Suraj Jitindar Singh, Tom Rix, Vaibhav Jain, YueHaibing, Zhang Jianhua, and Zhen Lei. * tag 'powerpc-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (218 commits) powerpc: Only build restart_table.c for 64s powerpc/64s: move ret_from_fork etc above __end_soft_masked powerpc/64s/interrupt: clean up interrupt return labels powerpc/64/interrupt: add missing kprobe annotations on interrupt exit symbols powerpc/64: enable MSR[EE] in irq replay pt_regs powerpc/64s/interrupt: preserve regs->softe for NMI interrupts powerpc/64s: add a table of implicit soft-masked addresses powerpc/64e: remove implicit soft-masking and interrupt exit restart logic powerpc/64e: fix CONFIG_RELOCATABLE build warnings powerpc/64s: fix hash page fault interrupt handler powerpc/4xx: Fix setup_kuep() on SMP powerpc/32s: Fix setup_{kuap/kuep}() on SMP powerpc/interrupt: Use names in check_return_regs_valid() powerpc/interrupt: Also use exit_must_hard_disable() on PPC32 powerpc/sysfs: Replace sizeof(arr)/sizeof(arr[0]) with ARRAY_SIZE powerpc/ptrace: Refactor regs_set_return_{msr/ip} powerpc/ptrace: Move set_return_regs_changed() before regs_set_return_{msr/ip} powerpc/stacktrace: Fix spurious "stale" traces in raise_backtrace_ipi() powerpc/pseries/vas: Include irqdomain.h powerpc: mark local variables around longjmp as volatile ...
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S251
1 files changed, 187 insertions, 64 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7fc6e078d4e..4aec59a77d4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -428,18 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* If coming from user, skip soft-mask tests. */
andi. r10,r12,MSR_PR
- bne 2f
-
- /* Kernel code running below __end_interrupts is implicitly
- * soft-masked */
- LOAD_HANDLER(r10, __end_interrupts)
+ bne 3f
+
+ /*
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
+ */
+ LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
li r10,IMASK
- blt- 1f
+ b 2f /* In soft-mask table, always mask */
/* Test the soft mask state against our interrupt's bit */
- lbz r10,PACAIRQSOFTMASK(r13)
-1: andi. r10,r10,IMASK
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
/* Associate vector numbers with bits in paca->irq_happened */
.if IVEC == 0x500 || IVEC == 0xea0
li r10,PACA_IRQ_EE
@@ -470,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
-2: mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -485,6 +498,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ /* Mark our [H]SRRs valid for return */
+ li r10,1
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ stb r10,PACAHSRR_VALID(r13)
+ FTR_SECTION_ELSE
+ stb r10,PACASRR_VALID(r13)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ stb r10,PACAHSRR_VALID(r13)
+ .else
+ stb r10,PACASRR_VALID(r13)
+ .endif
+
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
@@ -577,6 +604,66 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
__GEN_COMMON_BODY \name
.endm
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___restart_table)
+ LOAD_REG_ADDR(r10, __stop___restart_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ ld r12,16(r9)
+ b 303f
+301:
+ addi r9,r9,24
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
@@ -584,10 +671,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
+ li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
+ stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
+ stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
@@ -704,17 +794,17 @@ __start_interrupts:
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
* In particular, this means we can take a maskable interrupt at any point
* in the scv handler, which is unlike any other interrupt. This is solved
- * by treating the instruction addresses below __end_interrupts as being
- * soft-masked.
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
*
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
* ensure scv is never executed with relocation off, which means AIL-0
* should never happen.
*
- * Before leaving the below __end_interrupts text, at least of the following
- * must be true:
+ * Before leaving the following inside-__end_soft_masked text, at least of the
+ * following must be true:
* - MSR[PR]=1 (i.e., return to userspace)
- * - MSR_EE|MSR_RI is set (no reentrant exceptions)
+ * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
* - Standard kernel environment is set up (stack, paca, etc)
*
* Call convention:
@@ -722,6 +812,7 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
@@ -751,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
+2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
@@ -1149,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
- b interrupt_return
+ b interrupt_return_srr
#ifdef CONFIG_PPC_P7_NAP
@@ -1275,7 +1369,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
1: bl do_break
/*
@@ -1283,7 +1377,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* If so, we need to restore them with their updated values.
*/
REST_NVGPRS(r1)
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1323,7 +1417,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1332,7 +1426,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1368,7 +1462,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1403,7 +1497,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1412,7 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1456,7 +1550,11 @@ EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
+ BEGIN_FTR_SECTION
+ b interrupt_return_hsrr
+ FTR_SECTION_ELSE
+ b interrupt_return_srr
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/**
@@ -1483,7 +1581,7 @@ EXC_COMMON_BEGIN(alignment_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1590,7 +1688,7 @@ EXC_COMMON_BEGIN(program_check_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/*
@@ -1633,12 +1731,12 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
@@ -1677,7 +1775,7 @@ EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
addi r3,r1,STACK_FRAME_OVERHEAD
bl timer_interrupt
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1714,6 +1812,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
*
* Be careful to avoid touching the kernel stack.
*/
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
@@ -1761,7 +1861,7 @@ EXC_COMMON_BEGIN(doorbell_super_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
+ b interrupt_return_srr
EXC_REAL_NONE(0xb00, 0x100)
@@ -1838,8 +1938,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mtctr r10
bctr
.else
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
#ifdef CONFIG_RELOCATABLE
__LOAD_HANDLER(r10, system_call_common)
mtctr r10
@@ -1925,7 +2023,7 @@ EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
addi r3,r1,STACK_FRAME_OVERHEAD
bl single_step_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -1963,7 +2061,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -1988,7 +2086,7 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2012,7 +2110,7 @@ EXC_COMMON_BEGIN(emulation_assist_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl emulation_assist_interrupt
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2089,7 +2187,7 @@ EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2119,7 +2217,7 @@ EXC_COMMON_BEGIN(h_doorbell_common)
#else
bl unknown_async_exception
#endif
- b interrupt_return
+ b interrupt_return_hsrr
/**
@@ -2145,7 +2243,7 @@ EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xec0, 0x20)
@@ -2188,7 +2286,7 @@ EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2225,19 +2323,19 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2278,14 +2376,14 @@ BEGIN_FTR_SECTION
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2313,7 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
+ b interrupt_return_srr
/**
@@ -2341,7 +2439,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
- b interrupt_return
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xfa0, 0x20)
@@ -2370,7 +2468,7 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_system_error_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
@@ -2401,7 +2499,7 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
addi r3,r1,STACK_FRAME_OVERHEAD
bl instruction_breakpoint_exception
- b interrupt_return
+ b interrupt_return_srr
EXC_REAL_NONE(0x1400, 0x100)
@@ -2509,6 +2607,8 @@ BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -2521,7 +2621,7 @@ EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
+ b interrupt_return_hsrr
#ifdef CONFIG_CBE_RAS
@@ -2538,7 +2638,7 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_maintenance_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
@@ -2568,7 +2668,7 @@ EXC_COMMON_BEGIN(altivec_assist_common)
#else
bl unknown_exception
#endif
- b interrupt_return
+ b interrupt_return_srr
#ifdef CONFIG_CBE_RAS
@@ -2585,7 +2685,7 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_thermal_exception
- b interrupt_return
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
@@ -2610,7 +2710,6 @@ INT_DEFINE_END(soft_nmi)
* and run it entirely with interrupts hard disabled.
*/
EXC_COMMON_BEGIN(soft_nmi_common)
- mfspr r11,SPRN_SRR0
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
@@ -2624,6 +2723,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mtmsrd r9,1
kuap_kernel_restore r9, r10
+
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
@@ -2645,19 +2745,24 @@ masked_Hinterrupt:
.else
masked_interrupt:
.endif
- lbz r11,PACAIRQHAPPENED(r13)
- or r11,r11,r10
- stb r11,PACAIRQHAPPENED(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ lbz r9,PACAIRQHAPPENED(r13)
+ or r9,r9,r10
+ stb r9,PACAIRQHAPPENED(r13)
+
+ .if ! \hsrr
cmpwi r10,PACA_IRQ_DEC
bne 1f
- lis r10,0x7fff
- ori r10,r10,0xffff
- mtspr SPRN_DEC,r10
+ LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
+ mtspr SPRN_DEC,r9
#ifdef CONFIG_PPC_WATCHDOG
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
b soft_nmi_common
#else
b 2f
#endif
+ .endif
+
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
xori r12,r12,MSR_EE /* clear MSR_EE */
@@ -2666,11 +2771,29 @@ masked_interrupt:
.else
mtspr SPRN_SRR1,r12
.endif
- ori r11,r11,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQHAPPENED(r13)
+ ori r9,r9,PACA_IRQ_HARD_DIS
+ stb r9,PACAIRQHAPPENED(r13)
2: /* done */
- ld r10,PACA_EXGEN+EX_CTR(r13)
- mtctr r10
+ li r9,0
+ .if \hsrr
+ stb r9,PACAHSRR_VALID(r13)
+ .else
+ stb r9,PACASRR_VALID(r13)
+ .endif
+
+ SEARCH_RESTART_TABLE
+ cmpdi r12,0
+ beq 3f
+ .if \hsrr
+ mtspr SPRN_HSRR0,r12
+ .else
+ mtspr SPRN_SRR0,r12
+ .endif
+3:
+
+ ld r9,PACA_EXGEN+EX_CTR(r13)
+ mtctr r9
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -2881,7 +3004,7 @@ MASKED_INTERRUPT hsrr=1
USE_FIXED_SECTION(virt_trampolines)
/*
- * All code below __end_interrupts is treated as soft-masked. If
+ * All code below __end_soft_masked is treated as soft-masked. If
* any code runs here with MSR[EE]=1, it must then cope with pending
* soft interrupt being raised (i.e., by ensuring it is replayed).
*