diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:39:00 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 22:39:00 +0200 |
commit | af79ad2b1f337a00aa150b993635b10bc68dc842 (patch) | |
tree | 06abe1d9735b27a449443d7d29a9801f690080be /kernel/sched/sched.h | |
parent | Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff) | |
parent | sched/irqtime: Consolidate irqtime flushing code (diff) | |
download | linux-af79ad2b1f337a00aa150b993635b10bc68dc842.tar.xz linux-af79ad2b1f337a00aa150b993635b10bc68dc842.zip |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"The main changes are:
- irqtime accounting cleanups and enhancements. (Frederic Weisbecker)
- schedstat debugging enhancements, make it more broadly runtime
available. (Josh Poimboeuf)
- More work on asymmetric topology/capacity scheduling. (Morten
Rasmussen)
- sched/wait fixes and cleanups. (Oleg Nesterov)
- PELT (per entity load tracking) improvements. (Peter Zijlstra)
- Rewrite and enhance select_idle_siblings(). (Peter Zijlstra)
- sched/numa enhancements/fixes (Rik van Riel)
- sched/cputime scalability improvements (Stanislaw Gruszka)
- Load calculation arithmetics fixes. (Dietmar Eggemann)
- sched/deadline enhancements (Tommaso Cucinotta)
- Fix utilization accounting when switching to the SCHED_NORMAL
policy. (Vincent Guittot)
- ... plus misc cleanups and enhancements"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (64 commits)
sched/irqtime: Consolidate irqtime flushing code
sched/irqtime: Consolidate accounting synchronization with u64_stats API
u64_stats: Introduce IRQs disabled helpers
sched/irqtime: Remove needless IRQs disablement on kcpustat update
sched/irqtime: No need for preempt-safe accessors
sched/fair: Fix min_vruntime tracking
sched/debug: Add SCHED_WARN_ON()
sched/core: Fix set_user_nice()
sched/fair: Introduce set_curr_task() helper
sched/core, ia64: Rename set_curr_task()
sched/core: Fix incorrect utilization accounting when switching to fair class
sched/core: Optimize SCHED_SMT
sched/core: Rewrite and improve select_idle_siblings()
sched/core: Replace sd_busy/nr_busy_cpus with sched_domain_shared
sched/core: Introduce 'struct sched_domain_shared'
sched/core: Restructure destroy_sched_domain()
sched/core: Remove unused @cpu argument from destroy_sched_domain*()
sched/wait: Introduce init_wait_entry()
sched/wait: Avoid abort_exclusive_wait() in __wait_on_bit_lock()
sched/wait: Avoid abort_exclusive_wait() in ___wait_event()
...
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 92 |
1 files changed, 47 insertions, 45 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b7fc1ced4380..58df5590d028 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2,6 +2,7 @@ #include <linux/sched.h> #include <linux/sched/sysctl.h> #include <linux/sched/rt.h> +#include <linux/u64_stats_sync.h> #include <linux/sched/deadline.h> #include <linux/binfmts.h> #include <linux/mutex.h> @@ -15,6 +16,12 @@ #include "cpudeadline.h" #include "cpuacct.h" +#ifdef CONFIG_SCHED_DEBUG +#define SCHED_WARN_ON(x) WARN_ONCE(x, #x) +#else +#define SCHED_WARN_ON(x) ((void)(x)) +#endif + struct rq; struct cpuidle_state; @@ -565,6 +572,8 @@ struct root_domain { */ cpumask_var_t rto_mask; struct cpupri cpupri; + + unsigned long max_cpu_capacity; }; extern struct root_domain def_root_domain; @@ -597,7 +606,6 @@ struct rq { #ifdef CONFIG_SMP unsigned long last_load_update_tick; #endif /* CONFIG_SMP */ - u64 nohz_stamp; unsigned long nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL @@ -723,6 +731,23 @@ static inline int cpu_of(struct rq *rq) #endif } + +#ifdef CONFIG_SCHED_SMT + +extern struct static_key_false sched_smt_present; + +extern void __update_idle_core(struct rq *rq); + +static inline void update_idle_core(struct rq *rq) +{ + if (static_branch_unlikely(&sched_smt_present)) + __update_idle_core(rq); +} + +#else +static inline void update_idle_core(struct rq *rq) { } +#endif + DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) @@ -857,8 +882,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain *, sd_numa); -DECLARE_PER_CPU(struct sched_domain *, sd_busy); DECLARE_PER_CPU(struct sched_domain *, sd_asym); struct sched_group_capacity { @@ -870,10 +895,6 @@ struct sched_group_capacity { unsigned int capacity; unsigned long next_update; int imbalance; /* XXX unrelated to capacity but shared group state */ - /* - * Number of busy cpus in this group. - */ - atomic_t nr_busy_cpus; unsigned long cpumask[0]; /* iteration mask */ }; @@ -1260,6 +1281,11 @@ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) prev->sched_class->put_prev_task(rq, prev); } +static inline void set_curr_task(struct rq *rq, struct task_struct *curr) +{ + curr->sched_class->set_curr_task(rq); +} + #define sched_class_highest (&stop_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) @@ -1290,7 +1316,7 @@ static inline void idle_set_state(struct rq *rq, static inline struct cpuidle_state *idle_get_state(struct rq *rq) { - WARN_ON(!rcu_read_lock_held()); + SCHED_WARN_ON(!rcu_read_lock_held()); return rq->idle_state; } #else @@ -1710,52 +1736,28 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING +struct irqtime { + u64 hardirq_time; + u64 softirq_time; + u64 irq_start_time; + struct u64_stats_sync sync; +}; -DECLARE_PER_CPU(u64, cpu_hardirq_time); -DECLARE_PER_CPU(u64, cpu_softirq_time); - -#ifndef CONFIG_64BIT -DECLARE_PER_CPU(seqcount_t, irq_time_seq); - -static inline void irq_time_write_begin(void) -{ - __this_cpu_inc(irq_time_seq.sequence); - smp_wmb(); -} - -static inline void irq_time_write_end(void) -{ - smp_wmb(); - __this_cpu_inc(irq_time_seq.sequence); -} +DECLARE_PER_CPU(struct irqtime, cpu_irqtime); static inline u64 irq_time_read(int cpu) { - u64 irq_time; - unsigned seq; + struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); + unsigned int seq; + u64 total; do { - seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); - irq_time = per_cpu(cpu_softirq_time, cpu) + - per_cpu(cpu_hardirq_time, cpu); - } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); - - return irq_time; -} -#else /* CONFIG_64BIT */ -static inline void irq_time_write_begin(void) -{ -} + seq = __u64_stats_fetch_begin(&irqtime->sync); + total = irqtime->softirq_time + irqtime->hardirq_time; + } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); -static inline void irq_time_write_end(void) -{ -} - -static inline u64 irq_time_read(int cpu) -{ - return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); + return total; } -#endif /* CONFIG_64BIT */ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ #ifdef CONFIG_CPU_FREQ |