diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2019-01-19 03:45:17 +0100 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2019-02-07 21:16:17 +0100 |
commit | 815af8fff1d120f0c7ab04e7914270508984a04d (patch) | |
tree | 8abd662c75ed06b3ab746d1cd7ee8e4dbd13bba9 /arch/xtensa | |
parent | xtensa: document boot parameter passing (diff) | |
download | linux-815af8fff1d120f0c7ab04e7914270508984a04d.tar.xz linux-815af8fff1d120f0c7ab04e7914270508984a04d.zip |
xtensa: SMP: rework IPI processing
Don't skip current CPU in send_ipi_message: callers of this function
take care of it and it's harmless anyway.
Don't clear IPI bits one by one, clear all that were read at once.
Check IPI register in a loop in case new IPI was posted while previous
was being handled.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/kernel/smp.c | 38 |
1 files changed, 23 insertions, 15 deletions
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index be1f280c322c..3699d6d3e479 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -372,8 +372,7 @@ static void send_ipi_message(const struct cpumask *callmask, unsigned long mask = 0; for_each_cpu(index, callmask) - if (index != smp_processor_id()) - mask |= 1 << index; + mask |= 1 << index; set_er(mask, MIPISET(msg_id)); } @@ -412,22 +411,31 @@ irqreturn_t ipi_interrupt(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - unsigned int msg; - unsigned i; - msg = get_er(MIPICAUSE(cpu)); - for (i = 0; i < IPI_MAX; i++) - if (msg & (1 << i)) { - set_er(1 << i, MIPICAUSE(cpu)); - ++ipi->ipi_count[i]; + for (;;) { + unsigned int msg; + + msg = get_er(MIPICAUSE(cpu)); + set_er(msg, MIPICAUSE(cpu)); + + if (!msg) + break; + + if (msg & (1 << IPI_CALL_FUNC)) { + ++ipi->ipi_count[IPI_CALL_FUNC]; + generic_smp_call_function_interrupt(); } - if (msg & (1 << IPI_RESCHEDULE)) - scheduler_ipi(); - if (msg & (1 << IPI_CALL_FUNC)) - generic_smp_call_function_interrupt(); - if (msg & (1 << IPI_CPU_STOP)) - ipi_cpu_stop(cpu); + if (msg & (1 << IPI_RESCHEDULE)) { + ++ipi->ipi_count[IPI_RESCHEDULE]; + scheduler_ipi(); + } + + if (msg & (1 << IPI_CPU_STOP)) { + ++ipi->ipi_count[IPI_CPU_STOP]; + ipi_cpu_stop(cpu); + } + } return IRQ_HANDLED; } |