diff options
author | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2022-03-02 19:34:29 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-03-08 16:08:39 +0100 |
commit | f1304ecbef3c9f4aec119ce2a07335d3a0bc55a6 (patch) | |
tree | 0fac490f0aa5bff5950657c638073af30955424e /kernel/sched/deadline.c | |
parent | sched/deadline: Remove unused def_dl_bandwidth (diff) | |
download | linux-f1304ecbef3c9f4aec119ce2a07335d3a0bc55a6.tar.xz linux-f1304ecbef3c9f4aec119ce2a07335d3a0bc55a6.zip |
sched/deadline: Move bandwidth mgmt and reclaim functions into sched class source file
Move the deadline bandwidth management (admission control) functions
__dl_add(), __dl_sub() and __dl_overflow() as well as the bandwidth
reclaim function __dl_update() from private task scheduler header file
to the deadline sched class source file.
The functions are only used internally so they don't have to be
exported.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lore.kernel.org/r/20220302183433.333029-3-dietmar.eggemann@arm.com
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r-- | kernel/sched/deadline.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ed4251fa87c7..81bf97648e42 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -128,6 +128,21 @@ static inline bool dl_bw_visited(int cpu, u64 gen) rd->visit_gen = gen; return false; } + +static inline +void __dl_update(struct dl_bw *dl_b, s64 bw) +{ + struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); + int i; + + RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), + "sched RCU must be held"); + for_each_cpu_and(i, rd->span, cpu_active_mask) { + struct rq *rq = cpu_rq(i); + + rq->dl.extra_bw += bw; + } +} #else static inline struct dl_bw *dl_bw_of(int i) { @@ -148,9 +163,38 @@ static inline bool dl_bw_visited(int cpu, u64 gen) { return false; } + +static inline +void __dl_update(struct dl_bw *dl_b, s64 bw) +{ + struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); + + dl->extra_bw += bw; +} #endif static inline +void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) +{ + dl_b->total_bw -= tsk_bw; + __dl_update(dl_b, (s32)tsk_bw / cpus); +} + +static inline +void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) +{ + dl_b->total_bw += tsk_bw; + __dl_update(dl_b, -((s32)tsk_bw / cpus)); +} + +static inline bool +__dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) +{ + return dl_b->bw != -1 && + cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; +} + +static inline void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) { u64 old = dl_rq->running_bw; |