summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-05-28 08:15:45 +0200
committerIngo Molnar <mingo@kernel.org>2013-05-28 08:16:02 +0200
commitd07e75a6e0e8582bdecefe8868b0bfbdf2ee7085 (patch)
treec72e23472f84b73c5c1424af426be11c44bc0a92 /kernel/sched/fair.c
parentsched: Use this_rq() helper (diff)
parentsched: Move update_load_*() methods from sched.h to fair.c (diff)
downloadlinux-d07e75a6e0e8582bdecefe8868b0bfbdf2ee7085.tar.xz
linux-d07e75a6e0e8582bdecefe8868b0bfbdf2ee7085.zip
Merge branch 'sched/cleanups' into sched/core
Merge reason: these bits, formerly in sched/urgent, are too late for v3.10. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f2c9c0c3406c..f62b16dfba63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
+static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+{
+ lw->weight += inc;
+ lw->inv_weight = 0;
+}
+
+static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
+{
+ lw->weight -= dec;
+ lw->inv_weight = 0;
+}
+
+static inline void update_load_set(struct load_weight *lw, unsigned long w)
+{
+ lw->weight = w;
+ lw->inv_weight = 0;
+}
+
/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible