summaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c42
1 files changed, 18 insertions, 24 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index ea20f7d1ac2c..08cb0c3b8ccb 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now)
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
write_sequnlock(&jiffies_lock);
+ update_wall_time();
}
/*
@@ -177,7 +178,7 @@ static bool can_stop_full_tick(void)
* TODO: kick full dynticks CPUs when
* sched_clock_stable is set.
*/
- if (!sched_clock_stable) {
+ if (!sched_clock_stable()) {
trace_tick_stop(0, "unstable sched clock\n");
/*
* Don't allow the user to think they can get
@@ -391,11 +392,9 @@ __setup("nohz=", setup_tick_nohz);
*/
static void tick_nohz_update_jiffies(ktime_t now)
{
- int cpu = smp_processor_id();
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
unsigned long flags;
- ts->idle_waketime = now;
+ __this_cpu_write(tick_cpu_sched.idle_waketime, now);
local_irq_save(flags);
tick_do_update_jiffies64(now);
@@ -426,17 +425,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
}
-static void tick_nohz_stop_idle(int cpu, ktime_t now)
+static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-
- update_ts_time_stats(cpu, ts, now, NULL);
+ update_ts_time_stats(smp_processor_id(), ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
}
-static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
+static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
{
ktime_t now = ktime_get();
@@ -754,7 +751,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires;
int cpu = smp_processor_id();
- now = tick_nohz_start_idle(cpu, ts);
+ now = tick_nohz_start_idle(ts);
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
@@ -911,8 +908,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/
void tick_nohz_idle_exit(void)
{
- int cpu = smp_processor_id();
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now;
local_irq_disable();
@@ -925,7 +921,7 @@ void tick_nohz_idle_exit(void)
now = ktime_get();
if (ts->idle_active)
- tick_nohz_stop_idle(cpu, now);
+ tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_restart_sched_tick(ts, now);
@@ -1009,12 +1005,10 @@ static void tick_nohz_switch_to_nohz(void)
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
-static void tick_nohz_kick_tick(int cpu, ktime_t now)
+static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
{
#if 0
/* Switch back to 2.6.27 behaviour */
-
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t delta;
/*
@@ -1029,36 +1023,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
#endif
}
-static inline void tick_check_nohz(int cpu)
+static inline void tick_check_nohz_this_cpu(void)
{
- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
ktime_t now;
if (!ts->idle_active && !ts->tick_stopped)
return;
now = ktime_get();
if (ts->idle_active)
- tick_nohz_stop_idle(cpu, now);
+ tick_nohz_stop_idle(ts, now);
if (ts->tick_stopped) {
tick_nohz_update_jiffies(now);
- tick_nohz_kick_tick(cpu, now);
+ tick_nohz_kick_tick(ts, now);
}
}
#else
static inline void tick_nohz_switch_to_nohz(void) { }
-static inline void tick_check_nohz(int cpu) { }
+static inline void tick_check_nohz_this_cpu(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Called from irq_enter to notify about the possible interruption of idle()
*/
-void tick_check_idle(int cpu)
+void tick_check_idle(void)
{
- tick_check_oneshot_broadcast(cpu);
- tick_check_nohz(cpu);
+ tick_check_oneshot_broadcast_this_cpu();
+ tick_check_nohz_this_cpu();
}
/*