summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-09-15 00:48:55 +0200
committerPeter Zijlstra <peterz@infradead.org>2023-10-03 12:32:29 +0200
commit2f2fc17bab0011430ceb6f2dc1959e7d1f981444 (patch)
tree6146c97ac7473f528de944f729fc65a0b2259e63 /kernel/sched/fair.c
parentLinux 6.6-rc4 (diff)
downloadlinux-2f2fc17bab0011430ceb6f2dc1959e7d1f981444.tar.xz
linux-2f2fc17bab0011430ceb6f2dc1959e7d1f981444.zip
sched/eevdf: Also update slice on placement
Tasks that never consume their full slice would not update their slice value. This means that tasks that are spawned before the sysctl scaling keep their original (UP) slice length. Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230915124822.847197830@noisy.programming.kicks-ass.net
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cb225921bbca..7d73652acbb2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4919,10 +4919,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- u64 vslice = calc_delta_fair(se->slice, se);
- u64 vruntime = avg_vruntime(cfs_rq);
+ u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
+ se->slice = sysctl_sched_base_slice;
+ vslice = calc_delta_fair(se->slice, se);
+
/*
* Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag