summaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJane Chu <jane.chu@oracle.com>2023-06-15 20:13:25 +0200
committerVishal Verma <vishal.l.verma@intel.com>2023-06-26 15:54:23 +0200
commit1ea7ca1b090145519aad998679222f0a14ab8fce (patch)
treefb2db2940309adf5d9a477fdfe742af55ad710c0 /fs/dax.c
parentMerge branch 'for-6.5/dax-cleanups' into nvdimm-for-next (diff)
downloadlinux-1ea7ca1b090145519aad998679222f0a14ab8fce.tar.xz
linux-1ea7ca1b090145519aad998679222f0a14ab8fce.zip
dax: enable dax fault handler to report VM_FAULT_HWPOISON
When multiple processes mmap() a dax file, then at some point, a process issues a 'load' and consumes a hwpoison, the process receives a SIGBUS with si_code = BUS_MCEERR_AR and with si_lsb set for the poison scope. Soon after, any other process issues a 'load' to the poisoned page (that is unmapped from the kernel side by memory_failure), it receives a SIGBUS with si_code = BUS_ADRERR and without valid si_lsb. This is confusing to user, and is different from page fault due to poison in RAM memory, also some helpful information is lost. Channel dax backend driver's poison detection to the filesystem such that instead of reporting VM_FAULT_SIGBUS, it could report VM_FAULT_HWPOISON. If user level block IO syscalls fail due to poison, the errno will be converted to EIO to maintain block API consistency. Signed-off-by: Jane Chu <jane.chu@oracle.com> Link: https://lore.kernel.org/r/20230615181325.1327259-2-jane.chu@oracle.com Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/dax.c b/fs/dax.c
index cb36c6746fc4..906ecbd541a3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1148,7 +1148,7 @@ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
if (!zero_edge) {
ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
if (ret)
- return ret;
+ return dax_mem2blk_err(ret);
}
if (copy_all) {
@@ -1310,7 +1310,7 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
out_unlock:
dax_read_unlock(id);
- return ret;
+ return dax_mem2blk_err(ret);
}
int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
@@ -1342,7 +1342,8 @@ static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
NULL);
if (ret < 0)
- return ret;
+ return dax_mem2blk_err(ret);
+
memset(kaddr + offset, 0, size);
if (iomap->flags & IOMAP_F_SHARED)
ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
@@ -1498,7 +1499,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
DAX_ACCESS, &kaddr, NULL);
- if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
+ if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
map_len = dax_direct_access(dax_dev, pgoff,
PHYS_PFN(size), DAX_RECOVERY_WRITE,
&kaddr, NULL);
@@ -1506,7 +1507,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
recovery = true;
}
if (map_len < 0) {
- ret = map_len;
+ ret = dax_mem2blk_err(map_len);
break;
}