summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-12-01 10:26:33 +0100
committerThomas Gleixner <tglx@linutronix.de>2023-12-20 16:49:39 +0100
commitbb8caad5083f8fbba70faf41f1d3bab7cf09da6d (patch)
tree75128a304b02577d33a6fc2110ed011565ffa138
parenttimers: Use already existing function for forwarding timer base (diff)
downloadlinux-bb8caad5083f8fbba70faf41f1d3bab7cf09da6d.tar.xz
linux-bb8caad5083f8fbba70faf41f1d3bab7cf09da6d.zip
timers: Rework idle logic
To improve readability of the code, split base->idle calculation and expires calculation into separate parts. While at it, update the comment about timer base idle marking. Thereby the following subtle change happens if the next event is just one jiffy ahead and the tick was already stopped: Originally base->is_idle remains true in this situation. Now base->is_idle turns to false. This may spare an IPI if a timer is enqueued remotely to an idle CPU that is going to tick on the next jiffy. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20231201092654.34614-12-anna-maria@linutronix.de
-rw-r--r--kernel/time/timer.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 1a73d396101b..cf51655add64 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1924,6 +1924,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
u64 expires = KTIME_MAX;
unsigned long nextevt;
+ bool was_idle;
/*
* Pretend that there is no timer pending if the cpu is offline.
@@ -1943,27 +1944,26 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
*/
__forward_timer_base(base, basej);
- if (time_before_eq(nextevt, basej)) {
- expires = basem;
- if (base->is_idle) {
- base->is_idle = false;
- trace_timer_base_idle(false, base->cpu);
- }
- } else {
- if (base->timers_pending)
- expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
- /*
- * If we expect to sleep more than a tick, mark the base idle.
- * Also the tick is stopped so any added timer must forward
- * the base clk itself to keep granularity small. This idle
- * logic is only maintained for the BASE_STD base, deferrable
- * timers may still see large granularity skew (by design).
- */
- if ((expires - basem) > TICK_NSEC && !base->is_idle) {
- base->is_idle = true;
- trace_timer_base_idle(true, base->cpu);
- }
+ if (base->timers_pending) {
+ /* If we missed a tick already, force 0 delta */
+ if (time_before(nextevt, basej))
+ nextevt = basej;
+ expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
}
+
+ /*
+ * Base is idle if the next event is more than a tick away.
+ *
+ * If the base is marked idle then any timer add operation must forward
+ * the base clk itself to keep granularity small. This idle logic is
+ * only maintained for the BASE_STD base, deferrable timers may still
+ * see large granularity skew (by design).
+ */
+ was_idle = base->is_idle;
+ base->is_idle = time_after(nextevt, basej + 1);
+ if (was_idle != base->is_idle)
+ trace_timer_base_idle(base->is_idle, base->cpu);
+
raw_spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);