summaryrefslogtreecommitdiffstats
path: root/fs/locks.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-09-11 14:38:13 +0200
committerJ. Bruce Fields <bfields@citi.umich.edu>2007-10-10 00:32:45 +0200
commit84d535ade62b6f8ce852745731ad6200c46b977c (patch)
tree7e9c17b51a86a1fdb20aff02432fb85723fa9579 /fs/locks.c
parentlocks: kill redundant local variable (diff)
downloadlinux-84d535ade62b6f8ce852745731ad6200c46b977c.tar.xz
linux-84d535ade62b6f8ce852745731ad6200c46b977c.zip
Memory shortage can result in inconsistent flocks state
When the flock_lock_file() is called to change the flock from F_RDLCK to F_WRLCK or vice versa the existing flock can be removed without appropriate warning. Look: for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) break; if (IS_LEASE(fl)) continue; if (filp != fl->fl_file) continue; if (request->fl_type == fl->fl_type) goto out; found = 1; locks_delete_lock(before); <<<<<< ! break; } if after this point the subsequent locks_alloc_lock() will fail the return code will be -ENOMEM, but the existing lock is already removed. This is a known feature that such "re-locking" is not atomic, but in the racy case the file should stay locked (although by some other process), but in this case the file will be unlocked. The proposal is to prepare the lock in advance keeping no chance to fail in the future code. Found during making the flocks pid-namespaces aware. (Note: Thanks to Reuben Farrelly for finding a bug in an earlier version of this patch.) Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu> Cc: Reuben Farrelly <reuben-linuxkernel@reub.net>
Diffstat (limited to 'fs/locks.c')
-rw-r--r--fs/locks.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/fs/locks.c b/fs/locks.c
index efe1affe6bed..6e22c8129a80 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -733,6 +733,15 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
lock_kernel();
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
+
+ if (request->fl_type != F_UNLCK) {
+ error = -ENOMEM;
+ new_fl = locks_alloc_lock();
+ if (new_fl == NULL)
+ goto out;
+ error = 0;
+ }
+
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
@@ -754,10 +763,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
goto out;
}
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
/*
* If a higher-priority process was blocked on the old file lock,
* give it the opportunity to lock the file.