diff options
author | Roel Kluin <roel.kluin@gmail.com> | 2008-05-13 23:44:11 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-05-29 11:25:14 +0200 |
commit | 3f33a7ce9567ded582af1ab71f9802165fe12f09 (patch) | |
tree | 04304757dc84bc36400f306677d3452635cf9f74 /kernel | |
parent | revert ("sched: fair: weight calculations") (diff) | |
download | linux-3f33a7ce9567ded582af1ab71f9802165fe12f09.tar.xz linux-3f33a7ce9567ded582af1ab71f9802165fe12f09.zip |
sched: unite unlikely pairs in rt_policy() and schedule_debug()
Removes obfuscation and may improve assembly.
Signed-off-by: Roel Kluin <roel.kluin@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4aac8aa16037..97017356669a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) static inline int rt_policy(int policy) { - if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) + if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) return 1; return 0; } @@ -4433,7 +4433,7 @@ static inline void schedule_debug(struct task_struct *prev) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) + if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |