summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2020-03-30 11:01:27 +0200
committerIngo Molnar <mingo@kernel.org>2020-04-08 11:35:18 +0200
commitd76343c6b2b79f5e89c392bc9ce9dabc4c9e90cb (patch)
treed6aee9435fac43376d5da2f24e9c5e5dde89caff /kernel/sched
parentMerge tag 'drm-next-2020-04-08' of git://anongit.freedesktop.org/drm/drm (diff)
downloadlinux-d76343c6b2b79f5e89c392bc9ce9dabc4c9e90cb.tar.xz
linux-d76343c6b2b79f5e89c392bc9ce9dabc4c9e90cb.zip
sched/fair: Align rq->avg_idle and rq->avg_scan_cost
sched/core.c uses update_avg() for rq->avg_idle and sched/fair.c uses an open-coded version (with the exact same decay factor) for rq->avg_scan_cost. On top of that, select_idle_cpu() expects to be able to compare these two fields. The only difference between the two is that rq->avg_scan_cost is computed using a pure division rather than a shift. Turns out it actually matters, first of all because the shifted value can be negative, and the standard has this to say about it: """ The result of E1 >> E2 is E1 right-shifted E2 bit positions. [...] If E1 has a signed type and a negative value, the resulting value is implementation-defined. """ Not only this, but (arithmetic) right shifting a negative value (using 2's complement) is *not* equivalent to dividing it by the corresponding power of 2. Let's look at a few examples: -4 -> 0xF..FC -4 >> 3 -> 0xF..FF == -1 != -4 / 8 -8 -> 0xF..F8 -8 >> 3 -> 0xF..FF == -1 == -8 / 8 -9 -> 0xF..F7 -9 >> 3 -> 0xF..FE == -2 != -9 / 8 Make update_avg() use a division, and export it to the private scheduler header to reuse it where relevant. Note that this still lets compilers use a shift here, but should prevent any unwanted surprise. The disassembly of select_idle_cpu() remains unchanged on arm64, and ttwu_do_wakeup() gains 2 instructions; the diff sort of looks like this: - sub x1, x1, x0 + subs x1, x1, x0 // set condition codes + add x0, x1, #0x7 + csel x0, x0, x1, mi // x0 = x1 < 0 ? x0 : x1 add x0, x3, x0, asr #3 which does the right thing (i.e. gives us the expected result while still using an arithmetic shift) Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20200330090127.16294-1-valentin.schneider@arm.com
Diffstat (limited to '')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/sched.h6
3 files changed, 8 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a2694ba82874..f6b329bca0c6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2119,12 +2119,6 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
return cpu;
}
-static void update_avg(u64 *avg, u64 sample)
-{
- s64 diff = sample - *avg;
- *avg += diff >> 3;
-}
-
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1ea3dddafe69..fb025e946f83 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6080,8 +6080,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
struct sched_domain *this_sd;
u64 avg_cost, avg_idle;
- u64 time, cost;
- s64 delta;
+ u64 time;
int this = smp_processor_id();
int cpu, nr = INT_MAX;
@@ -6119,9 +6118,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
}
time = cpu_clock(this) - time;
- cost = this_sd->avg_scan_cost;
- delta = (s64)(time - cost) / 8;
- this_sd->avg_scan_cost += delta;
+ update_avg(&this_sd->avg_scan_cost, time);
return cpu;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0f616bf7bce3..cd008147eccb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -195,6 +195,12 @@ static inline int task_has_dl_policy(struct task_struct *p)
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+static inline void update_avg(u64 *avg, u64 sample)
+{
+ s64 diff = sample - *avg;
+ *avg += diff / 8;
+}
+
/*
* !! For sched_setattr_nocheck() (kernel) only !!
*