summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2018-08-10 17:48:18 +0200
committerDarrick J. Wong <darrick.wong@oracle.com>2018-08-12 17:37:31 +0200
commite25ff835af89a80aa6a4de58f413e494b2b96bd1 (patch)
tree0fc0f6bd0cc21537f79509bba4761bb0f74696b3 /fs/xfs
parentxfs: repair the AGI (diff)
downloadlinux-e25ff835af89a80aa6a4de58f413e494b2b96bd1.tar.xz
linux-e25ff835af89a80aa6a4de58f413e494b2b96bd1.zip
xfs: Close race between direct IO and xfs_break_layouts()
This patch is the duplicate of ross's fix for ext4 for xfs. If the refcount of a page is lowered between the time that it is returned by dax_busy_page() and when the refcount is again checked in xfs_break_layouts() => ___wait_var_event(), the waiting function xfs_wait_dax_page() will never be called. This means that xfs_break_layouts() will still have 'retry' set to false, so we'll stop looping and never check the refcount of other pages in this inode. Instead, always continue looping as long as dax_layout_busy_page() gives us a page which it found with an elevated refcount. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_file.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 6b31f41eafa2..181e9084519b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -721,12 +721,10 @@ xfs_file_write_iter(
static void
xfs_wait_dax_page(
- struct inode *inode,
- bool *did_unlock)
+ struct inode *inode)
{
struct xfs_inode *ip = XFS_I(inode);
- *did_unlock = true;
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
schedule();
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
@@ -735,7 +733,7 @@ xfs_wait_dax_page(
static int
xfs_break_dax_layouts(
struct inode *inode,
- bool *did_unlock)
+ bool *retry)
{
struct page *page;
@@ -745,9 +743,10 @@ xfs_break_dax_layouts(
if (!page)
return 0;
+ *retry = true;
return ___wait_var_event(&page->_refcount,
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, xfs_wait_dax_page(inode, did_unlock));
+ 0, 0, xfs_wait_dax_page(inode));
}
int