summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2021-03-19 07:34:43 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2021-03-29 04:22:17 +0200
commit48cf12d88969bd4238b8769767eb476970319d93 (patch)
tree1a2ffdb8efbf23ec2e1dd1e836b7362fd5792d21 /arch/powerpc/kernel/irq.c
parentpowerpc/setup_64: Fix sparse warnings (diff)
downloadlinux-48cf12d88969bd4238b8769767eb476970319d93.tar.xz
linux-48cf12d88969bd4238b8769767eb476970319d93.zip
powerpc/irq: Inline call_do_irq() and call_do_softirq()
call_do_irq() and call_do_softirq() are simple enough to be worth inlining. Inlining them avoids an mflr/mtlr pair plus a save/reload on stack. This is inspired from S390 arch. Several other arches do more or less the same. The way sparc arch does seems odd thought. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210320122227.345427-1-mpe@ellerman.id.au
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b72abbff96c..260effc0a435 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -667,6 +667,47 @@ static inline void check_stack_overflow(void)
}
}
+static __always_inline void call_do_softirq(const void *sp)
+{
+ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_softirq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+ register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+ /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ "+r" (r3)
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_irq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
void __do_irq(struct pt_regs *regs)
{
unsigned int irq;