diff options
author | Alexey Dobriyan <adobriyan@sw.ru> | 2007-10-15 17:00:13 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:13 +0200 |
commit | a9957449b08ab561a33e1e038df06843b8d8dd9f (patch) | |
tree | 066272181b3d563f8029c99c8c5587d04597ffda /kernel | |
parent | sched: tweak wakeup granularity (diff) | |
download | linux-a9957449b08ab561a33e1e038df06843b8d8dd9f.tar.xz linux-a9957449b08ab561a33e1e038df06843b8d8dd9f.zip |
sched: uninline scheduler
* save ~300 bytes
* activate_idle_task() was moved to avoid a warning
bloat-o-meter output:
add/remove: 6/0 grow/shrink: 0/16 up/down: 438/-733 (-295) <===
function old new delta
__enqueue_entity - 165 +165
finish_task_switch - 110 +110
update_curr_rt - 79 +79
__load_balance_iterator - 32 +32
__task_rq_unlock - 28 +28
find_process_by_pid - 24 +24
do_sched_setscheduler 133 123 -10
sys_sched_rr_get_interval 176 165 -11
sys_sched_getparam 156 145 -11
normalize_rt_tasks 482 470 -12
sched_getaffinity 112 99 -13
sys_sched_getscheduler 86 72 -14
sched_setaffinity 226 212 -14
sched_setscheduler 666 642 -24
load_balance_start_fair 33 9 -24
load_balance_next_fair 33 9 -24
dequeue_task_rt 133 67 -66
put_prev_task_rt 97 28 -69
schedule_tail 133 50 -83
schedule 682 594 -88
enqueue_entity 499 366 -133
task_new_fair 317 180 -137
Signed-off-by: Alexey Dobriyan <adobriyan@sw.ru>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 44 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 |
3 files changed, 24 insertions, 24 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4f13d379bea5..ce9bb7aa7c12 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -608,7 +608,7 @@ repeat_lock_task: return rq; } -static inline void __task_rq_unlock(struct rq *rq) +static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { spin_unlock(&rq->lock); @@ -623,7 +623,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) /* * this_rq_lock - lock this runqueue and disable interrupts. */ -static inline struct rq *this_rq_lock(void) +static struct rq *this_rq_lock(void) __acquires(rq->lock) { struct rq *rq; @@ -986,20 +986,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) } /* - * activate_idle_task - move idle task to the _front_ of runqueue. - */ -static inline void activate_idle_task(struct task_struct *p, struct rq *rq) -{ - update_rq_clock(rq); - - if (p->state == TASK_UNINTERRUPTIBLE) - rq->nr_uninterruptible--; - - enqueue_task(rq, p, 0); - inc_nr_running(p, rq); -} - -/* * deactivate_task - remove a task from the runqueue. */ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) @@ -1206,7 +1192,7 @@ void kick_process(struct task_struct *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long source_load(int cpu, int type) +static unsigned long source_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); @@ -1221,7 +1207,7 @@ static inline unsigned long source_load(int cpu, int type) * Return a high guess at the load of a migration-target cpu weighted * according to the scheduling class and "nice" value. */ -static inline unsigned long target_load(int cpu, int type) +static unsigned long target_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); @@ -1813,7 +1799,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) +static void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; @@ -3020,7 +3006,7 @@ static DEFINE_SPINLOCK(balancing); * * Balancing parameters are set up in arch_init_sched_domains. */ -static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) +static void rebalance_domains(int cpu, enum cpu_idle_type idle) { int balance = 1; struct rq *rq = cpu_rq(cpu); @@ -4140,7 +4126,7 @@ struct task_struct *idle_task(int cpu) * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ -static inline struct task_struct *find_process_by_pid(pid_t pid) +static struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -5157,6 +5143,20 @@ static void migrate_live_tasks(int src_cpu) } /* + * activate_idle_task - move idle task to the _front_ of runqueue. + */ +static void activate_idle_task(struct task_struct *p, struct rq *rq) +{ + update_rq_clock(rq); + + if (p->state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible--; + + enqueue_task(rq, p, 0); + inc_nr_running(p, rq); +} + +/* * Schedules idle task to be the next runnable task on current CPU. * It does so by boosting its priority to highest possible and adding it to * the _front_ of the runqueue. Used by CPU offline code. @@ -6494,7 +6494,7 @@ int in_sched_functions(unsigned long addr) && addr < (unsigned long)__sched_text_end); } -static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) +static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) { cfs_rq->tasks_timeline = RB_ROOT; #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0856701db14e..48604eab7dad 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) * achieve that by always pre-iterating before returning * the current task: */ -static inline struct task_struct * +static struct task_struct * __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr) { struct task_struct *p; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index dbe4d8cf80d6..2f26c3d73506 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -7,7 +7,7 @@ * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. */ -static inline void update_curr_rt(struct rq *rq) +static void update_curr_rt(struct rq *rq) { struct task_struct *curr = rq->curr; u64 delta_exec; |