summaryrefslogtreecommitdiffstats
path: root/fs/iomap
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-07-24 07:45:59 +0200
committerDarrick J. Wong <darrick.wong@oracle.com>2020-08-05 18:24:16 +0200
commit60263d5889e6dc5987dc51b801be4955ff2e4aa7 (patch)
treecb8418c747e19055b1416e993c3b00578046f520 /fs/iomap
parentxfs: use ENOTBLK for direct I/O to buffered I/O fallback (diff)
downloadlinux-60263d5889e6dc5987dc51b801be4955ff2e4aa7.tar.xz
linux-60263d5889e6dc5987dc51b801be4955ff2e4aa7.zip
iomap: fall back to buffered writes for invalidation failures
Failing to invalid the page cache means data in incoherent, which is a very bad state for the system. Always fall back to buffered I/O through the page cache if we can't invalidate mappings. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Acked-by: Bob Peterson <rpeterso@redhat.com> Acked-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Theodore Ts'o <tytso@mit.edu> # for ext4 Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com> # for gfs2 Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/direct-io.c16
-rw-r--r--fs/iomap/trace.h1
2 files changed, 12 insertions, 5 deletions
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 190967e87b69..c1aafb2ab990 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -10,6 +10,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <linux/task_io_accounting_ops.h>
+#include "trace.h"
#include "../internal.h"
@@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
* can be mapped into multiple disjoint IOs and only a subset of the IOs issued
* may be pure data writes. In that case, we still need to do a full data sync
* completion.
+ *
+ * Returns -ENOTBLK In case of a page invalidation invalidation failure for
+ * writes. The callers needs to fall back to buffered I/O in this case.
*/
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
@@ -478,13 +482,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE) {
/*
* Try to invalidate cache pages for the range we are writing.
- * If this invalidation fails, tough, the write will still work,
- * but racing two incompatible write paths is a pretty crazy
- * thing to do, so we don't support it 100%.
+ * If this invalidation fails, let the caller fall back to
+ * buffered I/O.
*/
if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
- end >> PAGE_SHIFT))
- dio_warn_stale_pagecache(iocb->ki_filp);
+ end >> PAGE_SHIFT)) {
+ trace_iomap_dio_invalidate_fail(inode, pos, count);
+ ret = -ENOTBLK;
+ goto out_free_dio;
+ }
if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 5693a39d52fb..fdc7ae388476 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_invalidatepage);
+DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
#define IOMAP_TYPE_STRINGS \
{ IOMAP_HOLE, "HOLE" }, \