diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2017-08-09 14:41:21 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-08-09 15:45:26 +0200 |
commit | 0459ddfdb31e7d812b555a2530ecbacdf96961a6 (patch) | |
tree | 872d7e7c3ea27de89c18a2710c672038281f2c8e /arch | |
parent | powerpc/configs: Re-enable HARD/SOFT lockup detectors (diff) | |
download | linux-0459ddfdb31e7d812b555a2530ecbacdf96961a6.tar.xz linux-0459ddfdb31e7d812b555a2530ecbacdf96961a6.zip |
powerpc: NMI IPI improve lock primitive
When the NMI IPI lock is contended, spin at low SMT priority, using
loads only, and with interrupts enabled (where possible). This
improves behaviour under high contention (e.g., a system crash when
a number of CPUs are trying to enter the debugger).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/smp.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index cf0e1245b8cc..8d3320562c70 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags) hard_irq_disable(); while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { raw_local_irq_restore(*flags); - cpu_relax(); + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); raw_local_irq_save(*flags); hard_irq_disable(); } @@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags) static void nmi_ipi_lock(void) { while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) - cpu_relax(); + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); } static void nmi_ipi_unlock(void) @@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) nmi_ipi_lock_start(&flags); while (nmi_ipi_busy_count) { nmi_ipi_unlock_end(&flags); - cpu_relax(); + spin_until_cond(nmi_ipi_busy_count == 0); nmi_ipi_lock_start(&flags); } |