summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2022-05-16 17:36:04 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2022-05-22 07:58:28 +0200
commit5fe855169f9782c669f640b66242662209ffb72a (patch)
tree056206f2eb73cef2a8f1a8a66e64fc979e14ca0d /arch/powerpc
parentselftests/powerpc: Better reporting in spectre_v2 (diff)
downloadlinux-5fe855169f9782c669f640b66242662209ffb72a.tar.xz
linux-5fe855169f9782c669f640b66242662209ffb72a.zip
powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO
All supported versions of GCC & clang support asm goto. Remove the !CONFIG_CC_HAS_ASM_GOTO version of arch_local_irq_restore() Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/58df50c9e77e2ed945bacdead30412770578886b.1652715336.git.christophe.leroy@csgroup.eu
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/irq.c77
1 files changed, 0 insertions, 77 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e055e13685a..ea38c13936c7 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -217,7 +217,6 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
-#ifdef CONFIG_CC_HAS_ASM_GOTO
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -313,82 +312,6 @@ happened:
__hard_irq_enable();
preempt_enable();
}
-#else
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
-
- /* Write the new soft-enabled value */
- irq_soft_mask_set(mask);
- if (mask)
- return;
-
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(in_nmi() || in_hardirq());
-
- /*
- * From this point onward, we can take interrupts, preempt,
- * etc... unless we got hard-disabled. We check if an event
- * happened. If none happened, we know we can just return.
- *
- * We may have preempted before the check below, in which case
- * we are checking the "new" CPU instead of the old one. This
- * is only a problem if an event happened on the "old" CPU.
- *
- * External interrupt events will have caused interrupts to
- * be hard-disabled, so there is no problem, we
- * cannot have preempted.
- */
- irq_happened = get_irq_happened();
- if (!irq_happened) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- return;
- }
-
- /* We need to hard disable to replay. */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- } else {
- /*
- * We should already be hard disabled here. We had bugs
- * where that wasn't the case so let's dbl check it and
- * warn if we are wrong. Only do that when IRQ tracing
- * is enabled as mfmsr() can be costly.
- */
- if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
- }
-
- if (irq_happened == PACA_IRQ_HARD_DIS) {
- local_paca->irq_happened = 0;
- __hard_irq_enable();
- return;
- }
- }
-
- /*
- * Disable preempt here, so that the below preempt_enable will
- * perform resched if required (a replayed interrupt may set
- * need_resched).
- */
- preempt_disable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- replay_soft_interrupts_irqrestore();
- local_paca->irq_happened = 0;
-
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
- __hard_irq_enable();
- preempt_enable();
-}
-#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*