summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-03-05 09:06:23 +0100
committerIngo Molnar <mingo@kernel.org>2013-03-06 11:24:31 +0100
commit5e6521eaa1ee581a13b904f35b80c5efeb2baccb (patch)
tree4a8e82ba57da872636ff432edc036914163249e5 /kernel/sched/sched.h
parentsched: Move SCHED_LOAD_SHIFT macros to kernel/sched/sched.h (diff)
downloadlinux-5e6521eaa1ee581a13b904f35b80c5efeb2baccb.tar.xz
linux-5e6521eaa1ee581a13b904f35b80c5efeb2baccb.zip
sched: Move struct sched_group to kernel/sched/sched.h
Move struct sched_group_power and sched_group and related inline functions to kernel/sched/sched.h, as they are used internally only. Signed-off-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to '')
-rw-r--r--kernel/sched/sched.h56
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 709a30cdfd85..1a4a2b19c2f4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -572,6 +572,62 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_id);
+struct sched_group_power {
+ atomic_t ref;
+ /*
+ * CPU power of this group, SCHED_LOAD_SCALE being max power for a
+ * single CPU.
+ */
+ unsigned int power, power_orig;
+ unsigned long next_update;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
+
+ unsigned long cpumask[0]; /* iteration mask */
+};
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
+
+ unsigned int group_weight;
+ struct sched_group_power *sgp;
+
+ /*
+ * The CPUs this group covers.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ */
+ unsigned long cpumask[0];
+};
+
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
+/*
+ * cpumask masking which cpus in the group are allowed to iterate up the domain
+ * tree.
+ */
+static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+{
+ return to_cpumask(sg->sgp->cpumask);
+}
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
extern int group_balance_cpu(struct sched_group *sg);
#endif /* CONFIG_SMP */