diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-13 00:46:11 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-13 00:46:11 +0100 |
commit | 3acbdbf42e943d85174401357a6b6243479d4c76 (patch) | |
tree | 94d092eedc0e24f611a14a4fcceb9d3643b7ac25 /drivers | |
parent | Merge tag 'fscache-rewrite-20220111' of git://git.kernel.org/pub/scm/linux/ke... (diff) | |
parent | iomap: Fix error handling in iomap_zero_iter() (diff) | |
download | linux-3acbdbf42e943d85174401357a6b6243479d4c76.tar.xz linux-3acbdbf42e943d85174401357a6b6243479d4c76.zip |
Merge tag 'libnvdimm-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax and libnvdimm updates from Dan Williams:
"The bulk of this is a rework of the dax_operations API after
discovering the obstacles it posed to the work-in-progress DAX+reflink
support for XFS and other copy-on-write filesystem mechanics.
Primarily the need to plumb a block_device through the API to handle
partition offsets was a sticking point and Christoph untangled that
dependency in addition to other cleanups to make landing the
DAX+reflink support easier.
The DAX_PMEM_COMPAT option has been around for 4 years and not only
are distributions shipping userspace that understand the current
configuration API, but some are not even bothering to turn this option
on anymore, so it seems a good time to remove it per the deprecation
schedule. Recall that this was added after the device-dax subsystem
moved from /sys/class/dax to /sys/bus/dax for its sysfs organization.
All recent functionality depends on /sys/bus/dax.
Some other miscellaneous cleanups and reflink prep patches are
included as well.
Summary:
- Simplify the dax_operations API:
- Eliminate bdev_dax_pgoff() in favor of the filesystem
maintaining and applying a partition offset to all its DAX iomap
operations.
- Remove wrappers and device-mapper stacked callbacks for
->copy_from_iter() and ->copy_to_iter() in favor of moving
block_device relative offset responsibility to the
dax_direct_access() caller.
- Remove the need for an @bdev in filesystem-DAX infrastructure
- Remove unused uio helpers copy_from_iter_flushcache() and
copy_mc_to_iter() as only the non-check_copy_size() versions are
used for DAX.
- Prepare XFS for the pending (next merge window) DAX+reflink support
- Remove deprecated DEV_DAX_PMEM_COMPAT support
- Cleanup a straggling misuse of the GUID api"
* tag 'libnvdimm-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (38 commits)
iomap: Fix error handling in iomap_zero_iter()
ACPI: NFIT: Import GUID before use
dax: remove the copy_from_iter and copy_to_iter methods
dax: remove the DAXDEV_F_SYNC flag
dax: simplify dax_synchronous and set_dax_synchronous
uio: remove copy_from_iter_flushcache() and copy_mc_to_iter()
iomap: turn the byte variable in iomap_zero_iter into a ssize_t
memremap: remove support for external pgmap refcounts
fsdax: don't require CONFIG_BLOCK
iomap: build the block based code conditionally
dax: fix up some of the block device related ifdefs
fsdax: shift partition offset handling into the file systems
dax: return the partition offset from fs_dax_get_by_bdev
iomap: add a IOMAP_DAX flag
xfs: pass the mapping flags to xfs_bmbt_to_iomap
xfs: use xfs_direct_write_iomap_ops for DAX zeroing
xfs: move dax device handling into xfs_{alloc,free}_buftarg
ext4: cleanup the dax handling in ext4_fill_super
ext2: cleanup the dax handling in ext2_fill_super
fsdax: decouple zeroing from the iomap buffered I/O code
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/nfit/core.c | 4 | ||||
-rw-r--r-- | drivers/dax/Kconfig | 13 | ||||
-rw-r--r-- | drivers/dax/Makefile | 3 | ||||
-rw-r--r-- | drivers/dax/bus.c | 30 | ||||
-rw-r--r-- | drivers/dax/bus.h | 13 | ||||
-rw-r--r-- | drivers/dax/device.c | 6 | ||||
-rw-r--r-- | drivers/dax/pmem.c (renamed from drivers/dax/pmem/core.c) | 36 | ||||
-rw-r--r-- | drivers/dax/pmem/Makefile | 1 | ||||
-rw-r--r-- | drivers/dax/pmem/compat.c | 72 | ||||
-rw-r--r-- | drivers/dax/pmem/pmem.c | 30 | ||||
-rw-r--r-- | drivers/dax/super.c | 272 | ||||
-rw-r--r-- | drivers/md/dm-linear.c | 63 | ||||
-rw-r--r-- | drivers/md/dm-log-writes.c | 110 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 75 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 22 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 89 | ||||
-rw-r--r-- | drivers/md/dm.h | 4 | ||||
-rw-r--r-- | drivers/nvdimm/Kconfig | 2 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 38 | ||||
-rw-r--r-- | drivers/pci/p2pdma.c | 2 | ||||
-rw-r--r-- | drivers/s390/block/Kconfig | 2 | ||||
-rw-r--r-- | drivers/s390/block/dcssblk.c | 26 |
23 files changed, 192 insertions, 723 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7dd80acf92c7..e5d7f2bda13f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -678,10 +678,12 @@ static const char *spa_type_name(u16 type) int nfit_spa_type(struct acpi_nfit_system_address *spa) { + guid_t guid; int i; + import_guid(&guid, spa->range_guid); for (i = 0; i < NFIT_UUID_MAX; i++) - if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid)) + if (guid_equal(to_nfit_uuid(i), &guid)) return i; return -1; } diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index d2834c2cfa10..5fdf269a822e 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -1,8 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config DAX_DRIVER - select DAX - bool - menuconfig DAX tristate "DAX: direct access to differentiated memory" select SRCU @@ -70,13 +66,4 @@ config DEV_DAX_KMEM Say N if unsure. -config DEV_DAX_PMEM_COMPAT - tristate "PMEM DAX: support the deprecated /sys/class/dax interface" - depends on m && DEV_DAX_PMEM=m - default DEV_DAX_PMEM - help - Older versions of the libdaxctl library expect to find all - device-dax instances under /sys/class/dax. If libdaxctl in - your distribution is older than v58 say M, otherwise say N. - endif diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile index 9d4ba672d305..90a56ca3b345 100644 --- a/drivers/dax/Makefile +++ b/drivers/dax/Makefile @@ -2,10 +2,11 @@ obj-$(CONFIG_DAX) += dax.o obj-$(CONFIG_DEV_DAX) += device_dax.o obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o +obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o dax-y := super.o dax-y += bus.o device_dax-y := device.o +dax_pmem-y := pmem.o -obj-y += pmem/ obj-y += hmem/ diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 6cc4da4c713d..ee4568ef757c 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -10,8 +10,6 @@ #include "dax-private.h" #include "bus.h" -static struct class *dax_class; - static DEFINE_MUTEX(dax_bus_lock); #define DAX_NAME_LEN 30 @@ -1323,14 +1321,17 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) } /* - * No 'host' or dax_operations since there is no access to this - * device outside of mmap of the resulting character device. + * No dax_operations since there is no access to this device outside of + * mmap of the resulting character device. */ - dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC); + dax_dev = alloc_dax(dev_dax, NULL); if (IS_ERR(dax_dev)) { rc = PTR_ERR(dax_dev); goto err_alloc_dax; } + set_dax_synchronous(dax_dev); + set_dax_nocache(dax_dev); + set_dax_nomc(dax_dev); /* a device_dax instance is dead while the driver is not attached */ kill_dax(dax_dev); @@ -1343,10 +1344,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) inode = dax_inode(dax_dev); dev->devt = inode->i_rdev; - if (data->subsys == DEV_DAX_BUS) - dev->bus = &dax_bus_type; - else - dev->class = dax_class; + dev->bus = &dax_bus_type; dev->parent = parent; dev->type = &dev_dax_type; @@ -1445,22 +1443,10 @@ EXPORT_SYMBOL_GPL(dax_driver_unregister); int __init dax_bus_init(void) { - int rc; - - if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) { - dax_class = class_create(THIS_MODULE, "dax"); - if (IS_ERR(dax_class)) - return PTR_ERR(dax_class); - } - - rc = bus_register(&dax_bus_type); - if (rc) - class_destroy(dax_class); - return rc; + return bus_register(&dax_bus_type); } void __exit dax_bus_exit(void) { bus_unregister(&dax_bus_type); - class_destroy(dax_class); } diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h index 1e946ad7780a..381cec9ff05c 100644 --- a/drivers/dax/bus.h +++ b/drivers/dax/bus.h @@ -16,24 +16,15 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct range *range, int target_node, unsigned int align, unsigned long flags); -enum dev_dax_subsys { - DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */ - DEV_DAX_CLASS, -}; - struct dev_dax_data { struct dax_region *dax_region; struct dev_pagemap *pgmap; - enum dev_dax_subsys subsys; resource_size_t size; int id; }; struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data); -/* to be deleted when DEV_DAX_CLASS is removed */ -struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys); - struct dax_device_driver { struct device_driver drv; struct list_head ids; @@ -49,10 +40,6 @@ int __dax_driver_register(struct dax_device_driver *dax_drv, void dax_driver_unregister(struct dax_device_driver *dax_drv); void kill_dev_dax(struct dev_dax *dev_dax); -#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) -int dev_dax_probe(struct dev_dax *dev_dax); -#endif - /* * While run_dax() is potentially a generic operation that could be * defined in include/linux/dax.h we don't want to grow any users diff --git a/drivers/dax/device.c b/drivers/dax/device.c index dd8222a42808..e58d597f0415 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -433,11 +433,7 @@ int dev_dax_probe(struct dev_dax *dev_dax) inode = dax_inode(dax_dev); cdev = inode->i_cdev; cdev_init(cdev, &dax_fops); - if (dev->class) { - /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */ - cdev->owner = dev->parent->driver->owner; - } else - cdev->owner = dev->driver->owner; + cdev->owner = dev->driver->owner; cdev_set_parent(cdev, &dev->kobj); rc = cdev_add(cdev, dev->devt, 1); if (rc) diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem.c index 062e8bc14223..f050ea78bb83 100644 --- a/drivers/dax/pmem/core.c +++ b/drivers/dax/pmem.c @@ -3,11 +3,11 @@ #include <linux/memremap.h> #include <linux/module.h> #include <linux/pfn_t.h> -#include "../../nvdimm/pfn.h" -#include "../../nvdimm/nd.h" -#include "../bus.h" +#include "../nvdimm/pfn.h" +#include "../nvdimm/nd.h" +#include "bus.h" -struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) +static struct dev_dax *__dax_pmem_probe(struct device *dev) { struct range range; int rc, id, region_id; @@ -63,7 +63,6 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) .dax_region = dax_region, .id = id, .pgmap = &pgmap, - .subsys = subsys, .size = range_len(&range), }; dev_dax = devm_create_dev_dax(&data); @@ -73,7 +72,32 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) return dev_dax; } -EXPORT_SYMBOL_GPL(__dax_pmem_probe); + +static int dax_pmem_probe(struct device *dev) +{ + return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev)); +} + +static struct nd_device_driver dax_pmem_driver = { + .probe = dax_pmem_probe, + .drv = { + .name = "dax_pmem", + }, + .type = ND_DRIVER_DAX_PMEM, +}; + +static int __init dax_pmem_init(void) +{ + return nd_driver_register(&dax_pmem_driver); +} +module_init(dax_pmem_init); + +static void __exit dax_pmem_exit(void) +{ + driver_unregister(&dax_pmem_driver.drv); +} +module_exit(dax_pmem_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); +MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile index 010269f61d41..191c31f0d4f0 100644 --- a/drivers/dax/pmem/Makefile +++ b/drivers/dax/pmem/Makefile @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o -obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o dax_pmem-y := pmem.o dax_pmem_core-y := core.o diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c deleted file mode 100644 index d81dc35fd65d..000000000000 --- a/drivers/dax/pmem/compat.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ -#include <linux/percpu-refcount.h> -#include <linux/memremap.h> -#include <linux/module.h> -#include <linux/pfn_t.h> -#include <linux/nd.h> -#include "../bus.h" - -/* we need the private definitions to implement compat suport */ -#include "../dax-private.h" - -static int dax_pmem_compat_probe(struct device *dev) -{ - struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS); - int rc; - - if (IS_ERR(dev_dax)) - return PTR_ERR(dev_dax); - - if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL)) - return -ENOMEM; - - device_lock(&dev_dax->dev); - rc = dev_dax_probe(dev_dax); - device_unlock(&dev_dax->dev); - - devres_close_group(&dev_dax->dev, dev_dax); - if (rc) - devres_release_group(&dev_dax->dev, dev_dax); - - return rc; -} - -static int dax_pmem_compat_release(struct device *dev, void *data) -{ - device_lock(dev); - devres_release_group(dev, to_dev_dax(dev)); - device_unlock(dev); - - return 0; -} - -static void dax_pmem_compat_remove(struct device *dev) -{ - device_for_each_child(dev, NULL, dax_pmem_compat_release); -} - -static struct nd_device_driver dax_pmem_compat_driver = { - .probe = dax_pmem_compat_probe, - .remove = dax_pmem_compat_remove, - .drv = { - .name = "dax_pmem_compat", - }, - .type = ND_DRIVER_DAX_PMEM, -}; - -static int __init dax_pmem_compat_init(void) -{ - return nd_driver_register(&dax_pmem_compat_driver); -} -module_init(dax_pmem_compat_init); - -static void __exit dax_pmem_compat_exit(void) -{ - driver_unregister(&dax_pmem_compat_driver.drv); -} -module_exit(dax_pmem_compat_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c index 0ae4238a0ef8..dfe91a2990fe 100644 --- a/drivers/dax/pmem/pmem.c +++ b/drivers/dax/pmem/pmem.c @@ -7,34 +7,4 @@ #include <linux/nd.h> #include "../bus.h" -static int dax_pmem_probe(struct device *dev) -{ - return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS)); -} -static struct nd_device_driver dax_pmem_driver = { - .probe = dax_pmem_probe, - .drv = { - .name = "dax_pmem", - }, - .type = ND_DRIVER_DAX_PMEM, -}; - -static int __init dax_pmem_init(void) -{ - return nd_driver_register(&dax_pmem_driver); -} -module_init(dax_pmem_init); - -static void __exit dax_pmem_exit(void) -{ - driver_unregister(&dax_pmem_driver.drv); -} -module_exit(dax_pmem_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Intel Corporation"); -#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT) -/* For compat builds, don't load this module by default */ -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM); -#endif diff --git a/drivers/dax/super.c b/drivers/dax/super.c index b882cf8106ea..e3029389d809 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -7,10 +7,8 @@ #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/magic.h> -#include <linux/genhd.h> #include <linux/pfn_t.h> #include <linux/cdev.h> -#include <linux/hash.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/dax.h> @@ -21,15 +19,12 @@ * struct dax_device - anchor object for dax services * @inode: core vfs * @cdev: optional character interface for "device dax" - * @host: optional name for lookups where the device path is not available * @private: dax driver private data * @flags: state and boolean properties */ struct dax_device { - struct hlist_node list; struct inode inode; struct cdev cdev; - const char *host; void *private; unsigned long flags; const struct dax_operations *ops; @@ -42,10 +37,6 @@ static DEFINE_IDA(dax_minor_ida); static struct kmem_cache *dax_cache __read_mostly; static struct super_block *dax_superblock __read_mostly; -#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) -static struct hlist_head dax_host_list[DAX_HASH_SIZE]; -static DEFINE_SPINLOCK(dax_host_lock); - int dax_read_lock(void) { return srcu_read_lock(&dax_srcu); @@ -58,169 +49,54 @@ void dax_read_unlock(int id) } EXPORT_SYMBOL_GPL(dax_read_unlock); -static int dax_host_hash(const char *host) -{ - return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; -} - -#ifdef CONFIG_BLOCK +#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) #include <linux/blkdev.h> -int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, - pgoff_t *pgoff) +static DEFINE_XARRAY(dax_hosts); + +int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { - sector_t start_sect = bdev ? get_start_sect(bdev) : 0; - phys_addr_t phys_off = (start_sect + sector) * 512; + return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(dax_add_host); - if (pgoff) - *pgoff = PHYS_PFN(phys_off); - if (phys_off % PAGE_SIZE || size % PAGE_SIZE) - return -EINVAL; - return 0; +void dax_remove_host(struct gendisk *disk) +{ + xa_erase(&dax_hosts, (unsigned long)disk); } -EXPORT_SYMBOL(bdev_dax_pgoff); +EXPORT_SYMBOL_GPL(dax_remove_host); -#if IS_ENABLED(CONFIG_FS_DAX) /** - * dax_get_by_host() - temporary lookup mechanism for filesystem-dax - * @host: alternate name for the device registered by a dax driver + * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax + * @bdev: block device to find a dax_device for + * @start_off: returns the byte offset into the dax_device that @bdev starts */ -static struct dax_device *dax_get_by_host(const char *host) +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off) { - struct dax_device *dax_dev, *found = NULL; - int hash, id; - - if (!host) - return NULL; - - hash = dax_host_hash(host); - - id = dax_read_lock(); - spin_lock(&dax_host_lock); - hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { - if (!dax_alive(dax_dev) - || strcmp(host, dax_dev->host) != 0) - continue; - - if (igrab(&dax_dev->inode)) - found = dax_dev; - break; - } - spin_unlock(&dax_host_lock); - dax_read_unlock(id); - - return found; -} + struct dax_device *dax_dev; + u64 part_size; + int id; -struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) -{ if (!blk_queue_dax(bdev->bd_disk->queue)) return NULL; - return dax_get_by_host(bdev->bd_disk->disk_name); -} -EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); - -bool generic_fsdax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors) -{ - bool dax_enabled = false; - pgoff_t pgoff, pgoff_end; - void *kaddr, *end_kaddr; - pfn_t pfn, end_pfn; - sector_t last_page; - long len, len2; - int err, id; - - if (blocksize != PAGE_SIZE) { - pr_info("%pg: error: unsupported blocksize for dax\n", bdev); - return false; - } - - if (!dax_dev) { - pr_debug("%pg: error: dax unsupported by block device\n", bdev); - return false; - } - err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); - if (err) { + *start_off = get_start_sect(bdev) * SECTOR_SIZE; + part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE; + if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) { pr_info("%pg: error: unaligned partition for dax\n", bdev); - return false; - } - - last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; - err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); - if (err) { - pr_info("%pg: error: unaligned partition for dax\n", bdev); - return false; + return NULL; } id = dax_read_lock(); - len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); - len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); - - if (len < 1 || len2 < 1) { - pr_info("%pg: error: dax access failed (%ld)\n", - bdev, len < 1 ? len : len2); - dax_read_unlock(id); - return false; - } - - if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { - /* - * An arch that has enabled the pmem api should also - * have its drivers support pfn_t_devmap() - * - * This is a developer warning and should not trigger in - * production. dax_flush() will crash since it depends - * on being able to do (page_address(pfn_to_page())). - */ - WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); - dax_enabled = true; - } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { - struct dev_pagemap *pgmap, *end_pgmap; - - pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); - end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); - if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX - && pfn_t_to_page(pfn)->pgmap == pgmap - && pfn_t_to_page(end_pfn)->pgmap == pgmap - && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) - && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) - dax_enabled = true; - put_dev_pagemap(pgmap); - put_dev_pagemap(end_pgmap); - - } + dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk); + if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) + dax_dev = NULL; dax_read_unlock(id); - if (!dax_enabled) { - pr_info("%pg: error: dax support not enabled\n", bdev); - return false; - } - return true; -} -EXPORT_SYMBOL_GPL(generic_fsdax_supported); - -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len) -{ - bool ret = false; - int id; - - if (!dax_dev) - return false; - - id = dax_read_lock(); - if (dax_alive(dax_dev) && dax_dev->ops->dax_supported) - ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, - start, len); - dax_read_unlock(id); - return ret; + return dax_dev; } -EXPORT_SYMBOL_GPL(dax_supported); -#endif /* CONFIG_FS_DAX */ -#endif /* CONFIG_BLOCK */ +EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); +#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ enum dax_device_flags { /* !alive + rcu grace period == no new operations / mappings */ @@ -229,6 +105,10 @@ enum dax_device_flags { DAXDEV_WRITE_CACHE, /* flag to check if device supports synchronous flush */ DAXDEV_SYNC, + /* do not leave the caches dirty after writes */ + DAXDEV_NOCACHE, + /* handle CPU fetch exceptions during reads */ + DAXDEV_NOMC, }; /** @@ -270,9 +150,15 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, if (!dax_alive(dax_dev)) return 0; - return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); + /* + * The userspace address for the memory copy has already been validated + * via access_ok() in vfs_write, so use the 'no check' version to bypass + * the HARDENED_USERCOPY overhead. + */ + if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags)) + return _copy_from_iter_flushcache(addr, bytes, i); + return _copy_from_iter(addr, bytes, i); } -EXPORT_SYMBOL_GPL(dax_copy_from_iter); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) @@ -280,9 +166,15 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, if (!dax_alive(dax_dev)) return 0; - return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i); + /* + * The userspace address for the memory copy has already been validated + * via access_ok() in vfs_red, so use the 'no check' version to bypass + * the HARDENED_USERCOPY overhead. + */ + if (test_bit(DAXDEV_NOMC, &dax_dev->flags)) + return _copy_mc_to_iter(addr, bytes, i); + return _copy_to_iter(addr, bytes, i); } -EXPORT_SYMBOL_GPL(dax_copy_to_iter); int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) @@ -332,17 +224,29 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev) } EXPORT_SYMBOL_GPL(dax_write_cache_enabled); -bool __dax_synchronous(struct dax_device *dax_dev) +bool dax_synchronous(struct dax_device *dax_dev) { return test_bit(DAXDEV_SYNC, &dax_dev->flags); } -EXPORT_SYMBOL_GPL(__dax_synchronous); +EXPORT_SYMBOL_GPL(dax_synchronous); -void __set_dax_synchronous(struct dax_device *dax_dev) +void set_dax_synchronous(struct dax_device *dax_dev) { set_bit(DAXDEV_SYNC, &dax_dev->flags); } -EXPORT_SYMBOL_GPL(__set_dax_synchronous); +EXPORT_SYMBOL_GPL(set_dax_synchronous); + +void set_dax_nocache(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_NOCACHE, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(set_dax_nocache); + +void set_dax_nomc(struct dax_device *dax_dev) +{ + set_bit(DAXDEV_NOMC, &dax_dev->flags); +} +EXPORT_SYMBOL_GPL(set_dax_nomc); bool dax_alive(struct dax_device *dax_dev) { @@ -363,12 +267,7 @@ void kill_dax(struct dax_device *dax_dev) return; clear_bit(DAXDEV_ALIVE, &dax_dev->flags); - synchronize_srcu(&dax_srcu); - - spin_lock(&dax_host_lock); - hlist_del_init(&dax_dev->list); - spin_unlock(&dax_host_lock); } EXPORT_SYMBOL_GPL(kill_dax); @@ -400,8 +299,6 @@ static struct dax_device *to_dax_dev(struct inode *inode) static void dax_free_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); - kfree(dax_dev->host); - dax_dev->host = NULL; if (inode->i_rdev) ida_simple_remove(&dax_minor_ida, iminor(inode)); kmem_cache_free(dax_cache, dax_dev); @@ -476,65 +373,30 @@ static struct dax_device *dax_dev_get(dev_t devt) return dax_dev; } -static void dax_add_host(struct dax_device *dax_dev, const char *host) -{ - int hash; - - /* - * Unconditionally init dax_dev since it's coming from a - * non-zeroed slab cache - */ - INIT_HLIST_NODE(&dax_dev->list); - dax_dev->host = host; - if (!host) - return; - - hash = dax_host_hash(host); - spin_lock(&dax_host_lock); - hlist_add_head(&dax_dev->list, &dax_host_list[hash]); - spin_unlock(&dax_host_lock); -} - -struct dax_device *alloc_dax(void *private, const char *__host, - const struct dax_operations *ops, unsigned long flags) +struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) { struct dax_device *dax_dev; - const char *host; dev_t devt; int minor; - if (ops && !ops->zero_page_range) { - pr_debug("%s: error: device does not provide dax" - " operation zero_page_range()\n", - __host ? __host : "Unknown"); + if (WARN_ON_ONCE(ops && !ops->zero_page_range)) return ERR_PTR(-EINVAL); - } - - host = kstrdup(__host, GFP_KERNEL); - if (__host && !host) - return ERR_PTR(-ENOMEM); minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); if (minor < 0) - goto err_minor; + return ERR_PTR(-ENOMEM); devt = MKDEV(MAJOR(dax_devt), minor); dax_dev = dax_dev_get(devt); if (!dax_dev) goto err_dev; - dax_add_host(dax_dev, host); dax_dev->ops = ops; dax_dev->private = private; - if (flags & DAXDEV_F_SYNC) - set_dax_synchronous(dax_dev); - return dax_dev; err_dev: ida_simple_remove(&dax_minor_ida, minor); - err_minor: - kfree(host); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(alloc_dax); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 66ba16713f69..1b97a11d7151 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -162,71 +162,34 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) { - long ret; struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); -} + sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT); -static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); + *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT; + return lc->dev->dax_dev; } -static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) +static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) { - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - struct linear_c *lc = ti->private; - struct block_device *bdev = lc->dev->bdev; - struct dax_device *dax_dev = lc->dev->dax_dev; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - - dev_sector = linear_map_sector(ti, sector); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); - if (ret) - return ret; + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + return dax_zero_page_range(dax_dev, pgoff, nr_pages); } #else #define linear_dax_direct_access NULL -#define linear_dax_copy_from_iter NULL -#define linear_dax_copy_to_iter NULL #define linear_dax_zero_page_range NULL #endif @@ -244,8 +207,6 @@ static struct target_type linear_target = { .prepare_ioctl = linear_prepare_ioctl, .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, - .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_copy_to_iter = linear_dax_copy_to_iter, .dax_zero_page_range = linear_dax_zero_page_range, }; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 0b3ef977ceeb..139b09b06eda 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -901,120 +901,34 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit limits->io_min = limits->physical_block_size; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes, - struct iov_iter *i) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti, + pgoff_t *pgoff) { - struct pending_block *block; - - if (!bytes) - return 0; - - block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); - if (!block) { - DMERR("Error allocating dax pending block"); - return -ENOMEM; - } - - block->data = kzalloc(bytes, GFP_KERNEL); - if (!block->data) { - DMERR("Error allocating dax data space"); - kfree(block); - return -ENOMEM; - } - - /* write data provided via the iterator */ - if (!copy_from_iter(block->data, bytes, i)) { - DMERR("Error copying dax data"); - kfree(block->data); - kfree(block); - return -EIO; - } - - /* rewind the iterator so that the block driver can use it */ - iov_iter_revert(i, bytes); - - block->datalen = bytes; - block->sector = bio_to_dev_sectors(lc, sector); - block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; - - atomic_inc(&lc->pending_blocks); - spin_lock_irq(&lc->blocks_lock); - list_add_tail(&block->list, &lc->unflushed_blocks); - spin_unlock_irq(&lc->blocks_lock); - wake_up_process(lc->log_kthread); + struct log_writes_c *lc = ti->private; - return 0; + *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT); + return lc->dev->dax_dev; } static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - int ret; - - ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn); -} - -static size_t log_writes_dax_copy_from_iter(struct dm_target *ti, - pgoff_t pgoff, void *addr, size_t bytes, - struct iov_iter *i) -{ - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - int err; - - if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - - /* Don't bother doing anything if logging has been disabled */ - if (!lc->logging_enabled) - goto dax_copy; - - err = log_dax(lc, sector, bytes, i); - if (err) { - DMWARN("Error %d logging DAX write", err); - return 0; - } -dax_copy: - return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); -} - -static size_t log_writes_dax_copy_to_iter(struct dm_target *ti, - pgoff_t pgoff, void *addr, size_t bytes, - struct iov_iter *i) -{ - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; + struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); - if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - struct log_writes_c *lc = ti->private; - sector_t sector = pgoff * PAGE_SECTORS; - - ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT, - &pgoff); - if (ret) - return ret; - return dax_zero_page_range(lc->dev->dax_dev, pgoff, - nr_pages << PAGE_SHIFT); + struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); + + return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT); } #else #define log_writes_dax_direct_access NULL -#define log_writes_dax_copy_from_iter NULL -#define log_writes_dax_copy_to_iter NULL #define log_writes_dax_zero_page_range NULL #endif @@ -1032,8 +946,6 @@ static struct target_type log_writes_target = { .iterate_devices = log_writes_iterate_devices, .io_hints = log_writes_io_hints, .direct_access = log_writes_dax_direct_access, - .dax_copy_from_iter = log_writes_dax_copy_from_iter, - .dax_copy_to_iter = log_writes_dax_copy_to_iter, .dax_zero_page_range = log_writes_dax_zero_page_range, }; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 6660b6b53d5b..e566115ec0bb 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -300,91 +300,40 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -#if IS_ENABLED(CONFIG_DAX_DRIVER) -static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) +#if IS_ENABLED(CONFIG_FS_DAX) +static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) { - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; struct block_device *bdev; + sector_t dev_sector; uint32_t stripe; - long ret; - stripe_map_sector(sc, sector, &stripe, &dev_sector); + stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector); dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; bdev = sc->stripe[stripe].dev->bdev; - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff); - if (ret) - return ret; - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); + *pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT; + return sc->stripe[stripe].dev->dax_dev; } -static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) +static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) { - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; + struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); -} - -static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; - - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff)) - return 0; - return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); + return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages) { - int ret; - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; - struct stripe_c *sc = ti->private; - struct dax_device *dax_dev; - struct block_device *bdev; - uint32_t stripe; - - stripe_map_sector(sc, sector, &stripe, &dev_sector); - dev_sector += sc->stripe[stripe].physical_start; - dax_dev = sc->stripe[stripe].dev->dax_dev; - bdev = sc->stripe[stripe].dev->bdev; + struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); - ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); - if (ret) - return ret; return dax_zero_page_range(dax_dev, pgoff, nr_pages); } #else #define stripe_dax_direct_access NULL -#define stripe_dax_copy_from_iter NULL -#define stripe_dax_copy_to_iter NULL #define stripe_dax_zero_page_range NULL #endif @@ -521,8 +470,6 @@ static struct target_type stripe_target = { .iterate_devices = stripe_iterate_devices, .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, - .dax_copy_from_iter = stripe_dax_copy_from_iter, - .dax_copy_to_iter = stripe_dax_copy_to_iter, .dax_zero_page_range = stripe_dax_zero_page_range, }; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aa173f5bdc3d..e43096cfe9e2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -806,12 +806,14 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ -int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, +static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - int blocksize = *(int *) data; + if (dev->dax_dev) + return false; - return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev); + return true; } /* Check devices support synchronous DAX */ @@ -821,8 +823,8 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de return !dev->dax_dev || !dax_synchronous(dev->dax_dev); } -bool dm_table_supports_dax(struct dm_table *t, - iterate_devices_callout_fn iterate_fn, int *blocksize) +static bool dm_table_supports_dax(struct dm_table *t, + iterate_devices_callout_fn iterate_fn) { struct dm_target *ti; unsigned i; @@ -835,7 +837,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - ti->type->iterate_devices(ti, iterate_fn, blocksize)) + ti->type->iterate_devices(ti, iterate_fn, NULL)) return false; } @@ -862,7 +864,6 @@ static int dm_table_determine_type(struct dm_table *t) struct dm_target *tgt; struct list_head *devices = dm_table_get_devices(t); enum dm_queue_mode live_md_type = dm_get_md_type(t->md); - int page_size = PAGE_SIZE; if (t->type != DM_TYPE_NONE) { /* target already set the table's type */ @@ -906,7 +907,7 @@ static int dm_table_determine_type(struct dm_table *t) verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; - if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || + if (dm_table_supports_dax(t, device_not_dax_capable) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; } @@ -1976,7 +1977,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { bool wc = false, fua = false; - int page_size = PAGE_SIZE; int r; /* @@ -2010,9 +2010,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); - if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { + if (dm_table_supports_dax(t, device_not_dax_capable)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) set_dax_synchronous(t->md->dax_dev); } else diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 4b8991cde223..4f31591d2d25 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -38,7 +38,7 @@ #define BITMAP_GRANULARITY PAGE_SIZE #endif -#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER) +#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_FS_DAX) #define DM_WRITECACHE_HAS_PMEM #endif diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 280918cdcabd..c0ae8087c602 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -637,7 +637,7 @@ static int open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) { struct block_device *bdev; - + u64 part_off; int r; BUG_ON(td->dm_dev.bdev); @@ -653,7 +653,7 @@ static int open_table_device(struct table_device *td, dev_t dev, } td->dm_dev.bdev = bdev; - td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev); + td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off); return 0; } @@ -1027,74 +1027,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } -static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len) -{ - struct mapped_device *md = dax_get_private(dax_dev); - struct dm_table *map; - bool ret = false; - int srcu_idx; - - map = dm_get_live_table(md, &srcu_idx); - if (!map) - goto out; - - ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); - -out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - -static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - long ret = 0; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (!ti->type->dax_copy_from_iter) { - ret = copy_from_iter(addr, bytes, i); - goto out; - } - ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); - out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - -static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - struct mapped_device *md = dax_get_private(dax_dev); - sector_t sector = pgoff * PAGE_SECTORS; - struct dm_target *ti; - long ret = 0; - int srcu_idx; - - ti = dm_dax_get_live_target(md, sector, &srcu_idx); - - if (!ti) - goto out; - if (!ti->type->dax_copy_to_iter) { - ret = copy_to_iter(addr, bytes, i); - goto out; - } - ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); - out: - dm_put_live_table(md, srcu_idx); - - return ret; -} - static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { @@ -1683,6 +1615,7 @@ static void cleanup_mapped_device(struct mapped_device *md) bioset_exit(&md->io_bs); if (md->dax_dev) { + dax_remove_host(md->disk); kill_dax(md->dax_dev); put_dax(md->dax_dev); md->dax_dev = NULL; @@ -1784,10 +1717,15 @@ static struct mapped_device *alloc_dev(int minor) md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); - if (IS_ENABLED(CONFIG_DAX_DRIVER)) { - md->dax_dev = alloc_dax(md, md->disk->disk_name, - &dm_dax_ops, 0); - if (IS_ERR(md->dax_dev)) + if (IS_ENABLED(CONFIG_FS_DAX)) { + md->dax_dev = alloc_dax(md, &dm_dax_ops); + if (IS_ERR(md->dax_dev)) { + md->dax_dev = NULL; + goto bad; + } + set_dax_nocache(md->dax_dev); + set_dax_nomc(md->dax_dev); + if (dax_add_host(md->dax_dev, md->disk)) goto bad; } @@ -3041,9 +2979,6 @@ static const struct block_device_operations dm_rq_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, - .dax_supported = dm_dax_supported, - .copy_from_iter = dm_dax_copy_from_iter, - .copy_to_iter = dm_dax_copy_to_iter, .zero_page_range = dm_dax_zero_page_range, }; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 742d9c80efe1..9013dc1a7b00 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -73,10 +73,6 @@ bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); -bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, - int *blocksize); -int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index b7d1eb38b27d..347fe7afa583 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -22,7 +22,7 @@ if LIBNVDIMM config BLK_DEV_PMEM tristate "PMEM: Persistent memory block device support" default LIBNVDIMM - select DAX_DRIVER + select DAX select ND_BTT if BTT select ND_PFN if NVDIMM_PFN help diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index fe7ece1534e1..58d95242a836 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -301,29 +301,8 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } -/* - * Use the 'no check' versions of copy_from_iter_flushcache() and - * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds - * checking, both file offset and device offset, is handled by - * dax_iomap_actor() - */ -static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - return _copy_from_iter_flushcache(addr, bytes, i); -} - -static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, - void *addr, size_t bytes, struct iov_iter *i) -{ - return _copy_mc_to_iter(addr, bytes, i); -} - static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, - .dax_supported = generic_fsdax_supported, - .copy_from_iter = pmem_copy_from_iter, - .copy_to_iter = pmem_copy_to_iter, .zero_page_range = pmem_dax_zero_page_range, }; @@ -379,6 +358,7 @@ static void pmem_release_disk(void *__pmem) { struct pmem_device *pmem = __pmem; + dax_remove_host(pmem->disk); kill_dax(pmem->dax_dev); put_dax(pmem->dax_dev); del_gendisk(pmem->disk); @@ -402,7 +382,6 @@ static int pmem_attach_disk(struct device *dev, struct gendisk *disk; void *addr; int rc; - unsigned long flags = 0UL; pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); if (!pmem) @@ -495,19 +474,24 @@ static int pmem_attach_disk(struct device *dev, nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); disk->bb = &pmem->bb; - if (is_nvdimm_sync(nd_region)) - flags = DAXDEV_F_SYNC; - dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); + dax_dev = alloc_dax(pmem, &pmem_dax_ops); if (IS_ERR(dax_dev)) { rc = PTR_ERR(dax_dev); goto out; } + set_dax_nocache(dax_dev); + set_dax_nomc(dax_dev); + if (is_nvdimm_sync(nd_region)) + set_dax_synchronous(dax_dev); + rc = dax_add_host(dax_dev, disk); + if (rc) + goto out_cleanup_dax; dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); pmem->dax_dev = dax_dev; rc = device_add_disk(dev, disk, pmem_attribute_groups); if (rc) - goto out_cleanup_dax; + goto out_remove_host; if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) return -ENOMEM; @@ -519,6 +503,8 @@ static int pmem_attach_disk(struct device *dev, dev_warn(dev, "'badblocks' notification disabled\n"); return 0; +out_remove_host: + dax_remove_host(pmem->disk); out_cleanup_dax: kill_dax(pmem->dax_dev); put_dax(pmem->dax_dev); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 8d47cb7218d1..454d5f6f16ff 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -219,7 +219,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr, pci_bus_address(pdev, bar) + offset, range_len(&pgmap->range), dev_to_node(&pdev->dev), - pgmap->ref); + &pgmap->ref); if (error) goto pages_free; diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index d0416dbd0cd8..e3710a762aba 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -5,7 +5,7 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m select FS_DAX_LIMITED - select DAX_DRIVER + select DAX prompt "DCSSBLK support" depends on S390 && BLOCK help diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 27ab888b44d0..d614843caf6c 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -44,18 +44,6 @@ static const struct block_device_operations dcssblk_devops = { .release = dcssblk_release, }; -static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev, - pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) -{ - return copy_from_iter(addr, bytes, i); -} - -static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev, - pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) -{ - return copy_to_iter(addr, bytes, i); -} - static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { @@ -72,9 +60,6 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, static const struct dax_operations dcssblk_dax_ops = { .direct_access = dcssblk_dax_direct_access, - .dax_supported = generic_fsdax_supported, - .copy_from_iter = dcssblk_dax_copy_from_iter, - .copy_to_iter = dcssblk_dax_copy_to_iter, .zero_page_range = dcssblk_dax_zero_page_range, }; @@ -687,18 +672,21 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; - dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name, - &dcssblk_dax_ops, DAXDEV_F_SYNC); + dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); if (IS_ERR(dev_info->dax_dev)) { rc = PTR_ERR(dev_info->dax_dev); dev_info->dax_dev = NULL; goto put_dev; } + set_dax_synchronous(dev_info->dax_dev); + rc = dax_add_host(dev_info->dax_dev, dev_info->gd); + if (rc) + goto out_dax; get_device(&dev_info->dev); rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL); if (rc) - goto out_dax; + goto out_dax_host; switch (dev_info->segment_type) { case SEG_TYPE_SR: @@ -714,6 +702,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = count; goto out; +out_dax_host: + dax_remove_host(dev_info->gd); out_dax: put_device(&dev_info->dev); kill_dax(dev_info->dax_dev); |