summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorYunfeng Ye <yeyunfeng@huawei.com>2020-11-17 14:19:46 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-11-19 10:48:29 +0100
commit94ad2e3cedb82af034f6d97c58022f162b669f9b (patch)
treeaaa1c3966d314e14a15da94422c0ae1134026a2f /kernel/time
parenttick/sched: Use tick_next_period for lockless quick check (diff)
downloadlinux-94ad2e3cedb82af034f6d97c58022f162b669f9b.tar.xz
linux-94ad2e3cedb82af034f6d97c58022f162b669f9b.zip
tick/sched: Reduce seqcount held scope in tick_do_update_jiffies64()
If jiffies are up to date already (caller lost the race against another CPU) there is no point to change the sequence count. Doing that just forces other CPUs into the seqcount retry loop in tick_nohz_next_event() for nothing. Just bail out early. [ tglx: Rewrote most of it ] Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20201117132006.462195901@linutronix.de
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b4b6abc81e4a..ca9191ced4b5 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -86,38 +86,35 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Reevaluate with jiffies_lock held */
raw_spin_lock(&jiffies_lock);
+ if (ktime_before(now, tick_next_period)) {
+ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
+
write_seqcount_begin(&jiffies_seq);
- delta = ktime_sub(now, last_jiffies_update);
- if (delta >= tick_period) {
+ last_jiffies_update = ktime_add(last_jiffies_update, tick_period);
- delta = ktime_sub(delta, tick_period);
- last_jiffies_update = ktime_add(last_jiffies_update,
- tick_period);
+ delta = ktime_sub(now, tick_next_period);
+ if (unlikely(delta >= tick_period)) {
+ /* Slow path for long idle sleep times */
+ s64 incr = ktime_to_ns(tick_period);
- /* Slow path for long timeouts */
- if (unlikely(delta >= tick_period)) {
- s64 incr = ktime_to_ns(tick_period);
+ ticks = ktime_divns(delta, incr);
- ticks = ktime_divns(delta, incr);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ incr * ticks);
+ }
- last_jiffies_update = ktime_add_ns(last_jiffies_update,
- incr * ticks);
- }
- do_timer(++ticks);
+ do_timer(++ticks);
+
+ /*
+ * Keep the tick_next_period variable up to date. WRITE_ONCE()
+ * pairs with the READ_ONCE() in the lockless quick check above.
+ */
+ WRITE_ONCE(tick_next_period,
+ ktime_add(last_jiffies_update, tick_period));
- /*
- * Keep the tick_next_period variable up to date.
- * WRITE_ONCE() pairs with the READ_ONCE() in the lockless
- * quick check above.
- */
- WRITE_ONCE(tick_next_period,
- ktime_add(last_jiffies_update, tick_period));
- } else {
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
- return;
- }
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
update_wall_time();