summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-08-24 17:38:30 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:13 +0200
commitb5b3e35f4149df72aaba612bba195fb2ec37b1b1 (patch)
tree9149ef6897523191337778d1452253bae75c366c /kernel/sched
parentsched/fair: Move enqueue migrate handling (diff)
downloadlinux-b5b3e35f4149df72aaba612bba195fb2ec37b1b1.tar.xz
linux-b5b3e35f4149df72aaba612bba195fb2ec37b1b1.zip
sched/fair: Rename {en,de}queue_entity_load_avg()
Since they're now purely about runnable_load, rename them. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 605b7c3e1ab8..fad77c8454e4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3549,7 +3549,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
/* Add the load generated by se into cfs_rq's load average */
static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
cfs_rq->runnable_load_avg += se->avg.load_avg;
cfs_rq->runnable_load_sum += se_weight(se) * se->avg.load_sum;
@@ -3557,7 +3557,7 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
/* Remove the runnable load generated by se from cfs_rq's runnable load average */
static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
sub_positive(&cfs_rq->runnable_load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->runnable_load_sum, se_weight(se) * se->avg.load_sum);
@@ -3650,9 +3650,9 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
}
static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void
@@ -3798,7 +3798,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - Add its new weight to cfs_rq->load.weight
*/
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
- enqueue_entity_load_avg(cfs_rq, se);
+ enqueue_runnable_load_avg(cfs_rq, se);
update_cfs_shares(se);
account_entity_enqueue(cfs_rq, se);
@@ -3882,7 +3882,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* of its group cfs_rq.
*/
update_load_avg(cfs_rq, se, UPDATE_TG);
- dequeue_entity_load_avg(cfs_rq, se);
+ dequeue_runnable_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se, flags);