summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-12 01:04:48 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-12 01:04:48 +0200
commitc3921ab71507b108d51a0f1ee960f80cd668a93d (patch)
treeb1408b898a8b50f15ad4a0cf1f29e17cc0138485 /kernel/sched.c
parentusb-serial: Add ThinkOptics WavIT (diff)
downloadlinux-c3921ab71507b108d51a0f1ee960f80cd668a93d.tar.xz
linux-c3921ab71507b108d51a0f1ee960f80cd668a93d.zip
Add new 'cond_resched_bkl()' helper function
It acts exactly like a regular 'cond_resched()', but will not get optimized away when CONFIG_PREEMPT is set. Normal kernel code is already preemptable in the presense of CONFIG_PREEMPT, so cond_resched() is optimized away (see commit 02b67cc3ba36bdba351d6c3a00593f4ec550d9d3 "sched: do not do cond_resched() when CONFIG_PREEMPT"). But when wanting to conditionally reschedule while holding a lock, you need to use "cond_sched_lock(lock)", and the new function is the BKL equivalent of that. Also make fs/locks.c use it. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--kernel/sched.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c51b6565e07c..8841a915545d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5525,7 +5525,6 @@ static void __cond_resched(void)
} while (need_resched());
}
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
int __sched _cond_resched(void)
{
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
@@ -5536,7 +5535,6 @@ int __sched _cond_resched(void)
return 0;
}
EXPORT_SYMBOL(_cond_resched);
-#endif
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,