summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched_rt.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 87d7b3ff3861..9becc3710b60 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -160,11 +160,23 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
struct rq *rq = task_rq(p);
/*
- * If the task will not preempt the RQ, try to find a better RQ
- * before we even activate the task
+ * If the current task is an RT task, then
+ * try to see if we can wake this RT task up on another
+ * runqueue. Otherwise simply start this RT task
+ * on its current runqueue.
+ *
+ * We want to avoid overloading runqueues. Even if
+ * the RT task is of higher priority than the current RT task.
+ * RT tasks behave differently than other tasks. If
+ * one gets preempted, we try to push it off to another queue.
+ * So trying to keep a preempting RT task on the same
+ * cache hot CPU will force the running RT task to
+ * a cold CPU. So we waste all the cache for the lower
+ * RT task in hopes of saving some of a RT task
+ * that is just being woken and probably will have
+ * cold cache anyway.
*/
- if ((p->prio >= rq->rt.highest_prio)
- && (p->nr_cpus_allowed > 1)) {
+ if (unlikely(rt_task(rq->curr))) {
int cpu = find_lowest_rq(p);
return (cpu == -1) ? task_cpu(p) : cpu;