diff options
author | Christoph Hellwig <hch@lst.de> | 2021-12-15 09:45:08 +0100 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2021-12-18 17:04:53 +0100 |
commit | 7ac5360cd4d02cc7e0eaf10867f599e041822f12 (patch) | |
tree | 995133c39f548b5ce8f01cc3a72f15d85838a630 /drivers/dax | |
parent | dax: remove the DAXDEV_F_SYNC flag (diff) | |
download | linux-7ac5360cd4d02cc7e0eaf10867f599e041822f12.tar.xz linux-7ac5360cd4d02cc7e0eaf10867f599e041822f12.zip |
dax: remove the copy_from_iter and copy_to_iter methods
These methods indirect the actual DAX read/write path. In the end pmem
uses magic flush and mc safe variants and fuse and dcssblk use plain ones
while device mapper picks redirects to the underlying device.
Add set_dax_nocache() and set_dax_nomc() APIs to control which copy
routines are used to remove indirect call from the read/write fast path
as well as a lot of boilerplate code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Vivek Goyal <vgoyal@redhat.com> [virtiofs]
Link: https://lore.kernel.org/r/20211215084508.435401-5-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dax')
-rw-r--r-- | drivers/dax/bus.c | 2 | ||||
-rw-r--r-- | drivers/dax/super.c | 36 |
2 files changed, 34 insertions, 4 deletions
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index da2a14d096d2..ee4568ef757c 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -1330,6 +1330,8 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) goto err_alloc_dax; } set_dax_synchronous(dax_dev); + set_dax_nocache(dax_dev); + set_dax_nomc(dax_dev); /* a device_dax instance is dead while the driver is not attached */ kill_dax(dax_dev); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index e81d5ee57390..e3029389d809 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -105,6 +105,10 @@ enum dax_device_flags { DAXDEV_WRITE_CACHE, /* flag to check if device supports synchronous flush */ DAXDEV_SYNC, + /* do not leave the caches dirty after writes */ + DAXDEV_NOCACHE, + /* handle CPU fetch exceptions during reads */ + DAXDEV_NOMC, }; /** @@ -146,9 +150,15 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, if (!dax_alive(dax_dev)) return 0; - return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); + /* + * The userspace address for the memory copy has already been validated + * via access_ok() in vfs_write, so use the 'no check' version to bypass + * the HARDENED_USERCOPY overhead. + */ + if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags)) + return _copy_from_iter_flushcache(addr, bytes, i); + return _copy_from_iter(addr, bytes, i); } -EXPORT_SYMBOL_GPL(dax_copy_from_iter); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) @@ -156,9 +166,15 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, if (!dax_alive(dax_dev)) return 0; - return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); + /* + * The userspace address for the memory copy has already been validated + * via access_ok() in vfs_red, so use the 'no check' version to bypass + * the HARDENED_USERCOPY overhead. + */ + if (test_bit(DAXDEV_NOMC, &dax_dev->flags)) + return _copy_mc_to_iter(addr, bytes, i); + return _copy_to_iter(addr, bytes, i); } -EXPORT_SYMBOL_GPL(dax_copy_to_iter); int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) @@ -220,6 +236,18 @@ void set_dax_synchronous(struct dax_device *dax_dev) } EXPORT_SYMBOL_GPL(set_dax_synchronous); +void set_dax_nocache(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_NOCACHE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(set_dax_nocache); + +void set_dax_nomc(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_NOMC, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(set_dax_nomc); + bool dax_alive(struct dax_device *dax_dev) { lockdep_assert_held(&dax_srcu); |