summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-09-21 22:16:02 +0200
committerPeter Zijlstra <peterz@infradead.org>2021-10-07 13:51:15 +0200
commit8850cb663b5cda04d33f9cfbc38889d73d3c8e24 (patch)
treeb928705d29ba099d985c9d2090a66ca1d28ed052 /kernel
parentsched,livepatch: Use task_call_func() (diff)
downloadlinux-8850cb663b5cda04d33f9cfbc38889d73d3c8e24.tar.xz
linux-8850cb663b5cda04d33f9cfbc38889d73d3c8e24.zip
sched: Simplify wake_up_*idle*()
Simplify and make wake_up_if_idle() more robust, also don't iterate the whole machine with preempt_disable() in it's caller: wake_up_all_idle_cpus(). This prepares for another wake_up_if_idle() user that needs a full do_idle() cycle. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Vasily Gorbik <gor@linux.ibm.com> Tested-by: Vasily Gorbik <gor@linux.ibm.com> # on s390 Link: https://lkml.kernel.org/r/20210929152428.769328779@infradead.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/smp.c6
2 files changed, 8 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 74db3c3afa6d..3b55ef99c03f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3695,15 +3695,11 @@ void wake_up_if_idle(int cpu)
if (!is_idle_task(rcu_dereference(rq->curr)))
goto out;
- if (set_nr_if_polling(rq->idle)) {
- trace_sched_wake_idle_without_ipi(cpu);
- } else {
- rq_lock_irqsave(rq, &rf);
- if (is_idle_task(rq->curr))
- smp_send_reschedule(cpu);
- /* Else CPU is not idle, do nothing here: */
- rq_unlock_irqrestore(rq, &rf);
- }
+ rq_lock_irqsave(rq, &rf);
+ if (is_idle_task(rq->curr))
+ resched_curr(rq);
+ /* Else CPU is not idle, do nothing here: */
+ rq_unlock_irqrestore(rq, &rf);
out:
rcu_read_unlock();
diff --git a/kernel/smp.c b/kernel/smp.c
index f43ede0ab183..ad0b68a3a3d3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -1170,14 +1170,14 @@ void wake_up_all_idle_cpus(void)
{
int cpu;
- preempt_disable();
+ cpus_read_lock();
for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
+ if (cpu == raw_smp_processor_id())
continue;
wake_up_if_idle(cpu);
}
- preempt_enable();
+ cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);