diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-03-23 14:19:05 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 09:36:08 +0100 |
commit | dfbca41f347997e57048a53755611c8e2d792924 (patch) | |
tree | d6afb30dd110cf31995b259db11b5fc7185f6895 /kernel/sched | |
parent | sched: Move CFS tasks to CPUs with higher capacity (diff) | |
download | linux-dfbca41f347997e57048a53755611c8e2d792924.tar.xz linux-dfbca41f347997e57048a53755611c8e2d792924.zip |
sched: Optimize freq invariant accounting
Currently the freq invariant accounting (in
__update_entity_runnable_avg() and sched_rt_avg_update()) get the
scale factor from a weak function call, this means that even for archs
that default on their implementation the compiler cannot see into this
function and optimize the extra scaling math away.
This is sad, esp. since its a 64-bit multiplication which can be quite
costly on some platforms.
So replace the weak function with #ifdef and __always_inline goo. This
is not quite as nice from an arch support PoV but should at least
result in compile time errors if done wrong.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Morten.Rasmussen@arm.com
Cc: Paul Turner <pjt@google.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: dietmar.eggemann@arm.com
Cc: efault@gmx.de
Cc: kamalesh@linux.vnet.ibm.com
Cc: nicolas.pitre@linaro.org
Cc: preeti@linux.vnet.ibm.com
Cc: riel@redhat.com
Link: http://lkml.kernel.org/r/20150323131905.GF23123@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 12 | ||||
-rw-r--r-- | kernel/sched/sched.h | 9 |
2 files changed, 8 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0576ce0e0af2..3a798ec36824 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u64 n) return contrib + runnable_avg_yN_sum[n]; } -unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu); - /* * We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable @@ -6010,16 +6008,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd, return load_idx; } -static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu) -{ - return SCHED_CAPACITY_SCALE; -} - -unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu) -{ - return default_scale_capacity(sd, cpu); -} - static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu) { if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dd532c558ad4..91c6736d2522 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1387,7 +1387,14 @@ static inline int hrtick_enabled(struct rq *rq) #ifdef CONFIG_SMP extern void sched_avg_update(struct rq *rq); -extern unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu); + +#ifndef arch_scale_freq_capacity +static __always_inline +unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { |