summaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-03-09 09:42:09 +0100
committerThomas Gleixner <tglx@linutronix.de>2021-03-17 16:33:57 +0100
commit697d8c63c4a2991a22a896a5e6adcdbb28fefe56 (patch)
tree223efa50042365abc94dcbec5287e94c94773aa0 /kernel/softirq.c
parenttasklets: Replace spin wait in tasklet_unlock_wait() (diff)
downloadlinux-697d8c63c4a2991a22a896a5e6adcdbb28fefe56.tar.xz
linux-697d8c63c4a2991a22a896a5e6adcdbb28fefe56.zip
tasklets: Replace spin wait in tasklet_kill()
tasklet_kill() spin waits for TASKLET_STATE_SCHED to be cleared invoking yield() from inside the loop. yield() is an ill defined mechanism and the result might still be wasting CPU cycles in a tight loop which is especially painful in a guest when the CPU running the tasklet is scheduled out. tasklet_kill() is used in teardown paths and not performance critical at all. Replace the spin wait with wait_var_event(). Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210309084241.890532921@linutronix.de
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ef6429a33883..ba89ca77698a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -532,10 +532,12 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
-static bool tasklet_should_run(struct tasklet_struct *t)
+static bool tasklet_clear_sched(struct tasklet_struct *t)
{
- if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
+ wake_up_var(&t->state);
return true;
+ }
WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
t->use_callback ? "callback" : "func",
@@ -563,7 +565,7 @@ static void tasklet_action_common(struct softirq_action *a,
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (tasklet_should_run(t)) {
+ if (tasklet_clear_sched(t)) {
if (t->use_callback)
t->callback(t);
else
@@ -623,13 +625,11 @@ void tasklet_kill(struct tasklet_struct *t)
if (in_interrupt())
pr_notice("Attempt to kill tasklet from interrupt\n");
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do {
- yield();
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
+
tasklet_unlock_wait(t);
- clear_bit(TASKLET_STATE_SCHED, &t->state);
+ tasklet_clear_sched(t);
}
EXPORT_SYMBOL(tasklet_kill);