summaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-03-13 12:21:26 +0100
committerIngo Molnar <mingo@elte.hu>2009-04-20 20:49:53 +0200
commitff743345bf7685a207868048a70e23164c4785e5 (patch)
tree7cdb917ad5ac3aa7798b0358a246ddc46a363cdc /kernel/mutex.c
parentsched: use group_first_cpu() instead of cpumask_first(sched_group_cpus()) (diff)
downloadlinux-ff743345bf7685a207868048a70e23164c4785e5.tar.xz
linux-ff743345bf7685a207868048a70e23164c4785e5.zip
sched: remove extra call overhead for schedule()
Lai Jiangshan's patch reminded me that I promised Nick to remove that extra call overhead in schedule(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090313112300.927414207@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5d79781394a3..e1fb73510409 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -248,7 +248,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- __schedule();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
spin_lock_mutex(&lock->wait_lock, flags);
}