summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2013-06-21 14:58:16 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2013-06-29 10:57:43 +0200
commit4e8c765d384e549f9b542ea0bd42e2aa227e1404 (patch)
tree23600131f3c0e8fa0d84707ccb19344f14627804
parentlocks: protect most of the file_lock handling with i_lock (diff)
downloadlinux-4e8c765d384e549f9b542ea0bd42e2aa227e1404.tar.xz
linux-4e8c765d384e549f9b542ea0bd42e2aa227e1404.zip
locks: avoid taking global lock if possible when waking up blocked waiters
Since we always hold the i_lock when inserting a new waiter onto the fl_block list, we can avoid taking the global lock at all if we find that it's empty when we go to wake up blocked waiters. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/locks.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/locks.c b/fs/locks.c
index ce302d43822b..84e269fc4c69 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -548,7 +548,10 @@ static void locks_delete_block(struct file_lock *waiter)
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
- * Must be called with file_lock_lock held!
+ * Must be called with both the i_lock and file_lock_lock held. The fl_block
+ * list itself is protected by the file_lock_list, but by ensuring that the
+ * i_lock is also held on insertions we can avoid taking the file_lock_lock
+ * in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
@@ -576,6 +579,16 @@ static void locks_insert_block(struct file_lock *blocker,
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
+ /*
+ * Avoid taking global lock if list is empty. This is safe since new
+ * blocked requests are only added to the list under the i_lock, and
+ * the i_lock is always held here. Note that removal from the fl_block
+ * list does not require the i_lock, so we must recheck list_empty()
+ * after acquiring the file_lock_lock.
+ */
+ if (list_empty(&blocker->fl_block))
+ return;
+
spin_lock(&file_lock_lock);
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter;