summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-12-21 15:06:50 +0100
committerIngo Molnar <mingo@kernel.org>2018-03-09 07:59:19 +0100
commit00357f5ec5d67a52a175da6f29f85c2c19d59bc8 (patch)
tree53799544850786339539ed2d7f39ff03a1f321b1 /kernel/sched/fair.c
parentsched/fair: Update blocked load from NEWIDLE (diff)
downloadlinux-00357f5ec5d67a52a175da6f29f85c2c19d59bc8.tar.xz
linux-00357f5ec5d67a52a175da6f29f85c2c19d59bc8.zip
sched/nohz: Clean up nohz enter/exit
The primary observation is that nohz enter/exit is always from the current CPU, therefore NOHZ_TICK_STOPPED does not in fact need to be an atomic. Secondary is that we appear to have 2 nearly identical hooks in the nohz enter code, set_cpu_sd_state_idle() and nohz_balance_enter_idle(). Fold the whole set_cpu_sd_state thing into nohz_balance_{enter,exit}_idle. Removes an atomic op from both enter and exit paths. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c73
1 files changed, 37 insertions, 36 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 85232dad89c9..494d5db9a6cd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9103,23 +9103,6 @@ static inline int find_new_ilb(void)
return nr_cpu_ids;
}
-static inline void set_cpu_sd_state_busy(void)
-{
- struct sched_domain *sd;
- int cpu = smp_processor_id();
-
- rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
-
- if (!sd || !sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 0;
-
- atomic_inc(&sd->shared->nr_busy_cpus);
-unlock:
- rcu_read_unlock();
-}
-
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick the
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
@@ -9175,8 +9158,7 @@ static void nohz_balancer_kick(struct rq *rq)
* We may be recently in ticked or tickless idle mode. At the first
* busy tick after returning from idle, we will update the busy stats.
*/
- set_cpu_sd_state_busy();
- nohz_balance_exit_idle(cpu);
+ nohz_balance_exit_idle(rq);
/*
* None are in tickless mode and hence no need for NOHZ idle load
@@ -9240,27 +9222,39 @@ out:
kick_ilb(flags);
}
-void nohz_balance_exit_idle(unsigned int cpu)
+static void set_cpu_sd_state_busy(int cpu)
{
- unsigned int flags = atomic_read(nohz_flags(cpu));
+ struct sched_domain *sd;
- if (unlikely(flags & NOHZ_TICK_STOPPED)) {
- /*
- * Completely isolated CPUs don't ever set, so we must test.
- */
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- }
+ rcu_read_lock();
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
- atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
- }
+ if (!sd || !sd->nohz_idle)
+ goto unlock;
+ sd->nohz_idle = 0;
+
+ atomic_inc(&sd->shared->nr_busy_cpus);
+unlock:
+ rcu_read_unlock();
}
-void set_cpu_sd_state_idle(void)
+void nohz_balance_exit_idle(struct rq *rq)
+{
+ SCHED_WARN_ON(rq != this_rq());
+
+ if (likely(!rq->nohz_tick_stopped))
+ return;
+
+ rq->nohz_tick_stopped = 0;
+ cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+
+ set_cpu_sd_state_busy(rq->cpu);
+}
+
+static void set_cpu_sd_state_idle(int cpu)
{
struct sched_domain *sd;
- int cpu = smp_processor_id();
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_llc, cpu));
@@ -9280,6 +9274,10 @@ unlock:
*/
void nohz_balance_enter_idle(int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+
+ SCHED_WARN_ON(cpu != smp_processor_id());
+
/* If this CPU is going down, then nothing needs to be done: */
if (!cpu_active(cpu))
return;
@@ -9288,16 +9286,19 @@ void nohz_balance_enter_idle(int cpu)
if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
return;
- if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
+ if (rq->nohz_tick_stopped)
return;
/* If we're a completely isolated CPU, we don't play: */
- if (on_null_domain(cpu_rq(cpu)))
+ if (on_null_domain(rq))
return;
+ rq->nohz_tick_stopped = 1;
+
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
- atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+ set_cpu_sd_state_idle(cpu);
}
#else
static inline void nohz_balancer_kick(struct rq *rq) { }