summaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
authorShang XiaoJing <shangxiaojing@huawei.com>2022-08-26 12:00:37 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-09-01 11:19:54 +0200
commit96458e7f7dc5ad14bd7577cbf1638e1504ad79dd (patch)
tree6ff8263bac2af987f82ffa956ab7a2368e64654a /kernel/sched/deadline.c
parentsched/deadline: Add dl_task_is_earliest_deadline helper (diff)
downloadlinux-96458e7f7dc5ad14bd7577cbf1638e1504ad79dd.tar.xz
linux-96458e7f7dc5ad14bd7577cbf1638e1504ad79dd.zip
sched/deadline: Add replenish_dl_new_period helper
Wrap repeated code in helper function replenish_dl_new_period, which set the deadline and runtime of input dl_se based on pi_of(dl_se). Note that setup_new_dl_entity originally set the deadline and runtime base on dl_se, which should equals to pi_of(dl_se) for non-boosted task. Signed-off-by: Shang XiaoJing <shangxiaojing@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Daniel Bristot de Oliveira <bristot@kernel.org> Link: https://lore.kernel.org/r/20220826100037.12146-1-shangxiaojing@huawei.com
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 34271aff4712..3bf4b12ec5b7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -770,6 +770,14 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
+static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
+ struct rq *rq)
+{
+ /* for non-boosted task, pi_of(dl_se) == dl_se */
+ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
+ dl_se->runtime = pi_of(dl_se)->dl_runtime;
+}
+
/*
* We are being explicitly informed that a new instance is starting,
* and this means that:
@@ -803,8 +811,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
* future; in fact, we must consider execution overheads (time
* spent on hardirq context, etc.).
*/
- dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
- dl_se->runtime = dl_se->dl_runtime;
+ replenish_dl_new_period(dl_se, rq);
}
/*
@@ -836,10 +843,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
* This could be the case for a !-dl task that is boosted.
* Just go with full inherited parameters.
*/
- if (dl_se->dl_deadline == 0) {
- dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
- dl_se->runtime = pi_of(dl_se)->dl_runtime;
- }
+ if (dl_se->dl_deadline == 0)
+ replenish_dl_new_period(dl_se, rq);
if (dl_se->dl_yielded && dl_se->runtime > 0)
dl_se->runtime = 0;
@@ -866,8 +871,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
printk_deferred_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
- dl_se->runtime = pi_of(dl_se)->dl_runtime;
+ replenish_dl_new_period(dl_se, rq);
}
if (dl_se->dl_yielded)
@@ -1024,8 +1028,7 @@ static void update_dl_entity(struct sched_dl_entity *dl_se)
return;
}
- dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
- dl_se->runtime = pi_of(dl_se)->dl_runtime;
+ replenish_dl_new_period(dl_se, rq);
}
}