diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-01-22 12:59:18 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-23 14:48:36 +0100 |
commit | d375b4e0fa3771343b370be0d876a1963c02e0a0 (patch) | |
tree | 21a015cf741235bb4a74d4ecae0e3d0b1cd4c32a /kernel/sched/clock.c | |
parent | sched/preempt/x86: Fix voluntary preempt for x86 (diff) | |
download | linux-d375b4e0fa3771343b370be0d876a1963c02e0a0.tar.xz linux-d375b4e0fa3771343b370be0d876a1963c02e0a0.zip |
sched/clock: Fixup early initialization
The code would assume sched_clock_stable() and switch to !stable
later, this switch brings a discontinuity in time.
The discontinuity on switching from stable to unstable was always
present, but previously we would set stable/unstable before
initializing TSC and usually stick to the one we start out with.
So the static_key bits brought an extra switch where there previously
wasn't one.
Things are further complicated by the fact that we cannot use
static_key as early as we usually call set_sched_clock_stable().
Fix things by tracking the stable state in a regular variable and only
set the static_key to the right state on sched_clock_init(), which is
ran right after late_time_init->tsc_init().
Before this we would not be using the TSC anyway.
Reported-and-Tested-by: Sasha Levin <sasha.levin@oracle.com>
Reported-by: dyoung@redhat.com
Fixes: 35af99e646c7 ("sched/clock, x86: Use a static_key for sched_clock_stable")
Cc: jacob.jun.pan@linux.intel.com
Cc: Mike Galbraith <bitbucket@online.de>
Cc: hpa@zytor.com
Cc: paulmck@linux.vnet.ibm.com
Cc: John Stultz <john.stultz@linaro.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: lenb@kernel.org
Cc: rjw@rjwysocki.net
Cc: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Cc: rui.zhang@intel.com
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140122115918.GG3694@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/clock.c')
-rw-r--r-- | kernel/sched/clock.c | 53 |
1 files changed, 41 insertions, 12 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 6bd6a6731b21..43c2bcc35761 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -77,35 +77,50 @@ __read_mostly int sched_clock_running; #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static struct static_key __sched_clock_stable = STATIC_KEY_INIT; +static int __sched_clock_stable_early; int sched_clock_stable(void) { - if (static_key_false(&__sched_clock_stable)) - return false; - return true; + return static_key_false(&__sched_clock_stable); } -void set_sched_clock_stable(void) +static void __set_sched_clock_stable(void) { if (!sched_clock_stable()) - static_key_slow_dec(&__sched_clock_stable); + static_key_slow_inc(&__sched_clock_stable); +} + +void set_sched_clock_stable(void) +{ + __sched_clock_stable_early = 1; + + smp_mb(); /* matches sched_clock_init() */ + + if (!sched_clock_running) + return; + + __set_sched_clock_stable(); } static void __clear_sched_clock_stable(struct work_struct *work) { /* XXX worry about clock continuity */ if (sched_clock_stable()) - static_key_slow_inc(&__sched_clock_stable); + static_key_slow_dec(&__sched_clock_stable); } static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); void clear_sched_clock_stable(void) { - if (keventd_up()) - schedule_work(&sched_clock_work); - else - __clear_sched_clock_stable(&sched_clock_work); + __sched_clock_stable_early = 0; + + smp_mb(); /* matches sched_clock_init() */ + + if (!sched_clock_running) + return; + + schedule_work(&sched_clock_work); } struct sched_clock_data { @@ -140,6 +155,20 @@ void sched_clock_init(void) } sched_clock_running = 1; + + /* + * Ensure that it is impossible to not do a static_key update. + * + * Either {set,clear}_sched_clock_stable() must see sched_clock_running + * and do the update, or we must see their __sched_clock_stable_early + * and do the update, or both. + */ + smp_mb(); /* matches {set,clear}_sched_clock_stable() */ + + if (__sched_clock_stable_early) + __set_sched_clock_stable(); + else + __clear_sched_clock_stable(NULL); } /* @@ -340,7 +369,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); */ u64 cpu_clock(int cpu) { - if (static_key_false(&__sched_clock_stable)) + if (!sched_clock_stable()) return sched_clock_cpu(cpu); return sched_clock(); @@ -355,7 +384,7 @@ u64 cpu_clock(int cpu) */ u64 local_clock(void) { - if (static_key_false(&__sched_clock_stable)) + if (!sched_clock_stable()) return sched_clock_cpu(raw_smp_processor_id()); return sched_clock(); |