summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-07-30 12:14:05 +0200
committerIngo Molnar <mingo@kernel.org>2020-08-06 16:50:59 +0200
commit820903c784a01bf6e143253418508da4f5790cff (patch)
treeaa5b70fb94aaf53b5c744673ce02d976f4a22ca4
parentMerge tag 'x86-fsgsbase-2020-08-04' of git://git.kernel.org/pub/scm/linux/ker... (diff)
downloadlinux-820903c784a01bf6e143253418508da4f5790cff.tar.xz
linux-820903c784a01bf6e143253418508da4f5790cff.zip
posix-cpu-timers: Split run_posix_cpu_timers()
Split it up as a preparatory step to move the heavy lifting out of interrupt context. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20200730102337.677439437@linutronix.de
-rw-r--r--kernel/time/posix-cpu-timers.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 165117996ea0..e5ad87320468 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1080,32 +1080,15 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
return false;
}
-/*
- * This is called from the timer interrupt handler. The irq handler has
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
-void run_posix_cpu_timers(void)
+static void __run_posix_cpu_timers(struct task_struct *tsk)
{
- struct task_struct *tsk = current;
struct k_itimer *timer, *next;
unsigned long flags;
LIST_HEAD(firing);
- lockdep_assert_irqs_disabled();
-
- /*
- * The fast path checks that there are no expired thread or thread
- * group timers. If that's so, just return.
- */
- if (!fastpath_timer_check(tsk))
+ if (!lock_task_sighand(tsk, &flags))
return;
- lockdep_posixtimer_enter();
- if (!lock_task_sighand(tsk, &flags)) {
- lockdep_posixtimer_exit();
- return;
- }
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
@@ -1147,6 +1130,28 @@ void run_posix_cpu_timers(void)
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
+}
+
+/*
+ * This is called from the timer interrupt handler. The irq handler has
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+void run_posix_cpu_timers(void)
+{
+ struct task_struct *tsk = current;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * The fast path checks that there are no expired thread or thread
+ * group timers. If that's so, just return.
+ */
+ if (!fastpath_timer_check(tsk))
+ return;
+
+ lockdep_posixtimer_enter();
+ __run_posix_cpu_timers(tsk);
lockdep_posixtimer_exit();
}