diff options
author | Hui Su <sh_def@163.com> | 2020-10-30 18:32:23 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-01-14 11:20:11 +0100 |
commit | 65bcf072e20ed7597caa902f170f293662b0af3c (patch) | |
tree | 9f38c6fa85339a674e748e3391e94931ea2b3998 /kernel | |
parent | sched/fair: Reduce cases for active balance (diff) | |
download | linux-65bcf072e20ed7597caa902f170f293662b0af3c.tar.xz linux-65bcf072e20ed7597caa902f170f293662b0af3c.zip |
sched: Use task_current() instead of 'rq->curr == p'
Use the task_current() function where appropriate.
No functional change.
Signed-off-by: Hui Su <sh_def@163.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Link: https://lkml.kernel.org/r/20201030173223.GA52339@rlk
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/debug.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 6 | ||||
-rw-r--r-- | kernel/sched/rt.c | 2 |
4 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 75686c6d4436..5421782fe897 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2514,7 +2514,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) static void prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) { - if (task_on_rq_queued(p) || rq->curr == p) { + if (task_on_rq_queued(p) || task_current(rq, p)) { #ifdef CONFIG_SMP /* * This might be too much, but unfortunately diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2357921580f9..486f403a778b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -486,7 +486,7 @@ static char *task_group_path(struct task_group *tg) static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { - if (rq->curr == p) + if (task_current(rq, p)) SEQ_printf(m, ">R"); else SEQ_printf(m, " %c", task_state_to_char(p)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 53802b77ce64..197a51473e0c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5430,7 +5430,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) s64 delta = slice - ran; if (delta < 0) { - if (rq->curr == p) + if (task_current(rq, p)) resched_curr(rq); return; } @@ -10829,7 +10829,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ - if (rq->curr == p) { + if (task_current(rq, p)) { if (p->prio > oldprio) resched_curr(rq); } else @@ -10962,7 +10962,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) * kick off the schedule if running, otherwise just see * if we can still preempt the current task. */ - if (rq->curr == p) + if (task_current(rq, p)) resched_curr(rq); else check_preempt_curr(rq, p, 0); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index dbe4629cf7ba..8f720b71d13d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2357,7 +2357,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) if (!task_on_rq_queued(p)) return; - if (rq->curr == p) { + if (task_current(rq, p)) { #ifdef CONFIG_SMP /* * If our priority decreases while running, we |