diff options
author | Peter Xu <peterx@redhat.com> | 2019-12-16 22:31:24 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-03-06 13:42:28 +0100 |
commit | e188f0a50f637391f440b9bf0a1066db71a20889 (patch) | |
tree | c6a22e5e9f5d6dff2663d19cf402767eafa6d421 | |
parent | smp: Allow smp_call_function_single_async() to insert locked csd (diff) | |
download | linux-e188f0a50f637391f440b9bf0a1066db71a20889.tar.xz linux-e188f0a50f637391f440b9bf0a1066db71a20889.zip |
MIPS: smp: Remove tick_broadcast_count
Now smp_call_function_single_async() provides the protection that
we'll return with -EBUSY if the csd object is still pending, then we
don't need the tick_broadcast_count counter any more.
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20191216213125.9536-3-peterx@redhat.com
-rw-r--r-- | arch/mips/kernel/smp.c | 9 |
1 files changed, 1 insertions, 8 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index f510c00bda88..0def6242b3ea 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -696,29 +696,22 @@ EXPORT_SYMBOL(flush_tlb_one); #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd); void tick_broadcast(const struct cpumask *mask) { - atomic_t *count; call_single_data_t *csd; int cpu; for_each_cpu(cpu, mask) { - count = &per_cpu(tick_broadcast_count, cpu); csd = &per_cpu(tick_broadcast_csd, cpu); - - if (atomic_inc_return(count) == 1) - smp_call_function_single_async(cpu, csd); + smp_call_function_single_async(cpu, csd); } } static void tick_broadcast_callee(void *info) { - int cpu = smp_processor_id(); tick_receive_broadcast(); - atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); } static int __init tick_broadcast_init(void) |