summaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c57
1 files changed, 44 insertions, 13 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 476cb0c0b4a4..1b3033105b40 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -59,6 +59,7 @@ ktime_t ktime_get(void)
return timespec_to_ktime(now);
}
+EXPORT_SYMBOL_GPL(ktime_get);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
@@ -135,7 +136,7 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
{
ktime_t xtim, tomono;
- struct timespec xts;
+ struct timespec xts, tom;
unsigned long seq;
do {
@@ -145,10 +146,11 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
#else
xts = xtime;
#endif
+ tom = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
xtim = timespec_to_ktime(xts);
- tomono = timespec_to_ktime(wall_to_monotonic);
+ tomono = timespec_to_ktime(tom);
base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
base->clock_base[CLOCK_MONOTONIC].softirq_time =
ktime_add(xtim, tomono);
@@ -277,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
return ktime_add(kt, tmp);
}
+
+EXPORT_SYMBOL_GPL(ktime_add_ns);
# endif /* !CONFIG_KTIME_SCALAR */
/*
@@ -458,6 +462,18 @@ void clock_was_set(void)
}
/*
+ * During resume we might have to reprogram the high resolution timer
+ * interrupt (on the local CPU):
+ */
+void hres_timers_resume(void)
+{
+ WARN_ON_ONCE(num_online_cpus() > 1);
+
+ /* Retrigger the CPU local events: */
+ retrigger_next_event(NULL);
+}
+
+/*
* Check, whether the timer is on the callback pending list
*/
static inline int hrtimer_cb_pending(const struct hrtimer *timer)
@@ -540,19 +556,19 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
/*
* Switch to high resolution mode
*/
-static void hrtimer_switch_to_hres(void)
+static int hrtimer_switch_to_hres(void)
{
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
unsigned long flags;
if (base->hres_active)
- return;
+ return 1;
local_irq_save(flags);
if (tick_init_highres()) {
local_irq_restore(flags);
- return;
+ return 0;
}
base->hres_active = 1;
base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
@@ -565,13 +581,14 @@ static void hrtimer_switch_to_hres(void)
local_irq_restore(flags);
printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
smp_processor_id());
+ return 1;
}
#else
static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline void hrtimer_switch_to_hres(void) { }
+static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
@@ -643,6 +660,12 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
orun++;
}
timer->expires = ktime_add(timer->expires, interval);
+ /*
+ * Make sure, that the result did not wrap with a very large
+ * interval.
+ */
+ if (timer->expires.tv64 < 0)
+ timer->expires = ktime_set(KTIME_SEC_MAX, 0);
return orun;
}
@@ -806,7 +829,12 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
timer_stats_hrtimer_set_start_info(timer);
- enqueue_hrtimer(timer, new_base, base == new_base);
+ /*
+ * Only allow reprogramming if the new base is on this CPU.
+ * (it might still be on another CPU if the timer was pending)
+ */
+ enqueue_hrtimer(timer, new_base,
+ new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
unlock_hrtimer_base(timer, &flags);
@@ -1130,6 +1158,9 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
+#endif
timer_stats_account_hrtimer(timer);
fn = timer->function;
@@ -1173,7 +1204,8 @@ void hrtimer_run_queues(void)
* deadlock vs. xtime_lock.
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
- hrtimer_switch_to_hres();
+ if (hrtimer_switch_to_hres())
+ return;
hrtimer_get_softirq_time(cpu_base);
@@ -1355,17 +1387,16 @@ static void migrate_hrtimers(int cpu)
tick_cancel_sched_timer(cpu);
local_irq_disable();
-
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ double_spin_lock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ double_spin_unlock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
local_irq_enable();
put_cpu_var(hrtimer_bases);
}