diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-12-04 10:09:53 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-04 10:09:53 +0100 |
commit | a934a56e1284f1863c9c800ff9c63183c25aec93 (patch) | |
tree | 643ffbf749b8b9d68e1b8641c6d179f535b9f9c0 /kernel | |
parent | Merge branch 'leds-fixes-for-3.13' of git://git.kernel.org/pub/scm/linux/kern... (diff) | |
parent | posix-timers: Fix full dynticks CPUs kick on timer rescheduling (diff) | |
download | linux-a934a56e1284f1863c9c800ff9c63183c25aec93.tar.xz linux-a934a56e1284f1863c9c800ff9c63183c25aec93.zip |
Merge branch 'timers/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core
Pull dynticks updates from Frederic Weisbecker:
* Fix a bug where posix cpu timers requeued due to interval got ignored on full
dynticks CPUs (not a regression though as it only impacts full dynticks and the
bug is there since we merged full dynticks).
* Optimizations and cleanups on the use of per CPU APIs to improve code readability,
performance and debuggability in the nohz subsystem;
* Optimize posix cpu timer by sparing stub workqueue queue with full dynticks off case
* Rename some functions to extend with *_this_cpu() suffix for clarity
* Refine the naming of some context tracking subsystem state accessors
* Trivial spelling fix by Paul Gortmaker
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/context_tracking.c | 8 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 21 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 6 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 39 |
6 files changed, 35 insertions, 47 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index e5f3917aa05b..6cb20d2e7ee0 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -53,10 +53,10 @@ void context_tracking_user_enter(void) /* * Repeat the user_enter() check here because some archs may be calling * this from asm and if no CPU needs context tracking, they shouldn't - * go further. Repeat the check here until they support the static key - * check. + * go further. Repeat the check here until they support the inline static + * key check. */ - if (!static_key_false(&context_tracking_enabled)) + if (!context_tracking_is_enabled()) return; /* @@ -160,7 +160,7 @@ void context_tracking_user_exit(void) { unsigned long flags; - if (!static_key_false(&context_tracking_enabled)) + if (!context_tracking_is_enabled()) return; if (in_interrupt()) diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c7f31aa272f7..79747b7d9420 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); */ static void posix_cpu_timer_kick_nohz(void) { - schedule_work(&nohz_kick_work); + if (context_tracking_is_enabled()) + schedule_work(&nohz_kick_work); } bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) @@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) put_task_struct(p); timer->it.cpu.task = p = NULL; timer->it.cpu.expires = 0; - goto out_unlock; + read_unlock(&tasklist_lock); + goto out; } else if (unlikely(p->exit_state) && thread_group_empty(p)) { /* * We've noticed that the thread is dead, but @@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) */ cpu_timer_sample_group(timer->it_clock, p, &now); clear_dead_task(timer, now); - goto out_unlock; + read_unlock(&tasklist_lock); + goto out; } spin_lock(&p->sighand->siglock); cpu_timer_sample_group(timer->it_clock, p, &now); @@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) BUG_ON(!irqs_disabled()); arm_timer(timer); spin_unlock(&p->sighand->siglock); - -out_unlock: read_unlock(&tasklist_lock); + /* Kick full dynticks CPUs in case they need to tick on the new timer */ + posix_cpu_timer_kick_nohz(); + out: timer->it_overrun_last = timer->it_overrun; timer->it_overrun = -1; @@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) cpu_timer_fire(timer); spin_unlock(&timer->it_lock); } - - /* - * In case some timers were rescheduled after the queue got emptied, - * wake up full dynticks CPUs. - */ - if (tsk->signal->cputimer.running) - posix_cpu_timer_kick_nohz(); } /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 11025ccc06dd..11348de09400 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -311,8 +311,6 @@ asmlinkage void do_softirq(void) */ void irq_enter(void) { - int cpu = smp_processor_id(); - rcu_irq_enter(); if (is_idle_task(current) && !in_interrupt()) { /* @@ -320,7 +318,7 @@ void irq_enter(void) * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); - tick_check_idle(cpu); + tick_check_idle(); _local_bh_enable(); } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9532690daaa9..43780ab5e279 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) * Called from irq_enter() when idle was interrupted to reenable the * per cpu device. */ -void tick_check_oneshot_broadcast(int cpu) +void tick_check_oneshot_broadcast_this_cpu(void) { - if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { - struct tick_device *td = &per_cpu(tick_cpu_device, cpu); + if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { + struct tick_device *td = &__get_cpu_var(tick_cpu_device); /* * We might be in the middle of switching over from diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 18e71f7fbc2a..e2bced59b6dd 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); -extern void tick_check_oneshot_broadcast(int cpu); +extern void tick_check_oneshot_broadcast_this_cpu(void); bool tick_broadcast_oneshot_available(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) @@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { } static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } -static inline void tick_check_oneshot_broadcast(int cpu) { } +static inline void tick_check_oneshot_broadcast_this_cpu(void) { } static inline bool tick_broadcast_oneshot_available(void) { return true; } # endif /* !BROADCAST */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f834..2afd43fca93b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz); */ static void tick_nohz_update_jiffies(ktime_t now) { - int cpu = smp_processor_id(); - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); unsigned long flags; - ts->idle_waketime = now; + __this_cpu_write(tick_cpu_sched.idle_waketime, now); local_irq_save(flags); tick_do_update_jiffies64(now); @@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda } -static void tick_nohz_stop_idle(int cpu, ktime_t now) +static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - - update_ts_time_stats(cpu, ts, now, NULL); + update_ts_time_stats(smp_processor_id(), ts, now, NULL); ts->idle_active = 0; sched_clock_idle_wakeup_event(0); } -static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) +static ktime_t tick_nohz_start_idle(struct tick_sched *ts) { ktime_t now = ktime_get(); @@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ktime_t now, expires; int cpu = smp_processor_id(); - now = tick_nohz_start_idle(cpu, ts); + now = tick_nohz_start_idle(ts); if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; @@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) */ void tick_nohz_idle_exit(void) { - int cpu = smp_processor_id(); - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; local_irq_disable(); @@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void) now = ktime_get(); if (ts->idle_active) - tick_nohz_stop_idle(cpu, now); + tick_nohz_stop_idle(ts, now); if (ts->tick_stopped) { tick_nohz_restart_sched_tick(ts, now); @@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void) * timer and do not touch the other magic bits which need to be done * when idle is left. */ -static void tick_nohz_kick_tick(int cpu, ktime_t now) +static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) { #if 0 /* Switch back to 2.6.27 behaviour */ - - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t delta; /* @@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now) #endif } -static inline void tick_check_nohz(int cpu) +static inline void tick_check_nohz_this_cpu(void) { - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; if (!ts->idle_active && !ts->tick_stopped) return; now = ktime_get(); if (ts->idle_active) - tick_nohz_stop_idle(cpu, now); + tick_nohz_stop_idle(ts, now); if (ts->tick_stopped) { tick_nohz_update_jiffies(now); - tick_nohz_kick_tick(cpu, now); + tick_nohz_kick_tick(ts, now); } } #else static inline void tick_nohz_switch_to_nohz(void) { } -static inline void tick_check_nohz(int cpu) { } +static inline void tick_check_nohz_this_cpu(void) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter to notify about the possible interruption of idle() */ -void tick_check_idle(int cpu) +void tick_check_idle(void) { - tick_check_oneshot_broadcast(cpu); - tick_check_nohz(cpu); + tick_check_oneshot_broadcast_this_cpu(); + tick_check_nohz_this_cpu(); } /* |