summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/sched.h2
2 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 102cdf1e4e97..573dc98c6248 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2553,6 +2553,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
u32 contrib;
int delta_w, scaled_delta_w, decayed = 0;
unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
delta = now - sa->last_update_time;
/*
@@ -2596,7 +2597,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
}
}
if (running)
- sa->util_sum += scaled_delta_w;
+ sa->util_sum += scale(scaled_delta_w, scale_cpu);
delta -= delta_w;
@@ -2620,7 +2621,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * contrib;
}
if (running)
- sa->util_sum += contrib;
+ sa->util_sum += scale(contrib, scale_cpu);
}
/* Remainder of delta accrued against u_0` */
@@ -2631,7 +2632,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
if (running)
- sa->util_sum += scaled_delta;
+ sa->util_sum += scale(scaled_delta, scale_cpu);
sa->period_contrib += delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c0726d5fd6a3..167ab4844ee6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1398,7 +1398,7 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
- if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+ if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
return sd->smt_gain / sd->span_weight;
return SCHED_CAPACITY_SCALE;