summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-05-22 14:04:28 +0200
committerIngo Molnar <mingo@kernel.org>2012-05-30 14:02:24 +0200
commitb654f7de41b0e3903ee2b51d3b8db77fe52ce728 (patch)
treecdcd46a04d8b59d75045f1d3ae753e4843dbe2c0
parentsched: Fix SD_OVERLAP (diff)
downloadlinux-b654f7de41b0e3903ee2b51d3b8db77fe52ce728.tar.xz
linux-b654f7de41b0e3903ee2b51d3b8db77fe52ce728.zip
sched: Make sure to not re-read variables after validation
We could re-read rq->rt_avg after we validated it was smaller than total, invalidating the check and resulting in an unintended negative. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: David Rientjes <rientjes@google.com> Link: http://lkml.kernel.org/r/1337688268.9698.29.camel@twins Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/fair.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f0380d4987b3..2b449a762074 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
unsigned long scale_rt_power(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- u64 total, available;
+ u64 total, available, age_stamp, avg;
- total = sched_avg_period() + (rq->clock - rq->age_stamp);
+ /*
+ * Since we're reading these variables without serialization make sure
+ * we read them once before doing sanity checks on them.
+ */
+ age_stamp = ACCESS_ONCE(rq->age_stamp);
+ avg = ACCESS_ONCE(rq->rt_avg);
+
+ total = sched_avg_period() + (rq->clock - age_stamp);
- if (unlikely(total < rq->rt_avg)) {
+ if (unlikely(total < avg)) {
/* Ensures that power won't end up being negative */
available = 0;
} else {
- available = total - rq->rt_avg;
+ available = total - avg;
}
if (unlikely((s64)total < SCHED_POWER_SCALE))