summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAnna-Maria Behnsen <anna-maria@linutronix.de>2024-02-21 10:05:40 +0100
committerThomas Gleixner <tglx@linutronix.de>2024-02-22 17:52:31 +0100
commit70b4cf84f3acd9e72c9ea9064d82577b6f29a60b (patch)
tree83c7a737114dd199151c8c1d2e033a8161eb2755 /kernel
parenttimers: Retrieve next expiry of pinned/non-pinned timers separately (diff)
downloadlinux-70b4cf84f3acd9e72c9ea9064d82577b6f29a60b.tar.xz
linux-70b4cf84f3acd9e72c9ea9064d82577b6f29a60b.zip
timers: Split out "get next timer interrupt" functionality
The functionality for getting the next timer interrupt in get_next_timer_interrupt() is split into a separate function fetch_next_timer_interrupt() to be usable by other call sites. This is preparatory work for the conversion of the NOHZ timer placement to a pull at expiry time model. No functional change. Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20240221090548.36600-13-anna-maria@linutronix.de
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timer.c64
1 files changed, 38 insertions, 26 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 38becd2facee..b10e97c995a7 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
return base->next_expiry;
}
-static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
- bool *idle)
+static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
+ struct timer_base *base_local,
+ struct timer_base *base_global,
+ struct timer_events *tevt)
{
- struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
unsigned long nextevt, nextevt_local, nextevt_global;
- struct timer_base *base_local, *base_global;
bool local_first;
- u64 expires;
-
- /*
- * Pretend that there is no timer pending if the cpu is offline.
- * Possible pending timers will be migrated later to an active cpu.
- */
- if (cpu_is_offline(smp_processor_id())) {
- if (idle)
- *idle = true;
- return tevt.local;
- }
-
- base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
- base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
-
- raw_spin_lock(&base_local->lock);
- raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej);
@@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
- tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC;
- goto forward;
+ tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
+ return nextevt;
}
/*
@@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
* ignored. If the global queue is empty, nothing to do either.
*/
if (!local_first && base_global->timers_pending)
- tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
+ tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
if (base_local->timers_pending)
- tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+ tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
+
+ return nextevt;
+}
+
+static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
+ bool *idle)
+{
+ struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
+ struct timer_base *base_local, *base_global;
+ unsigned long nextevt;
+ u64 expires;
+
+ /*
+ * Pretend that there is no timer pending if the cpu is offline.
+ * Possible pending timers will be migrated later to an active cpu.
+ */
+ if (cpu_is_offline(smp_processor_id())) {
+ if (idle)
+ *idle = true;
+ return tevt.local;
+ }
+
+ base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
+ base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
+
+ raw_spin_lock(&base_local->lock);
+ raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
+
+ nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
+ base_global, &tevt);
-forward:
/*
* We have a fresh next event. Check whether we can forward the
* base.