diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-06 03:49:20 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-06 03:49:20 +0200 |
commit | 53ef7d0e208fa38c3f63d287e0c3ab174f1e1235 (patch) | |
tree | 7d437edf73ef6deb0d77ce291aa25f041837d056 /fs | |
parent | Merge tag 'staging-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
parent | Merge branch 'for-4.12/dax' into libnvdimm-for-next (diff) | |
download | linux-53ef7d0e208fa38c3f63d287e0c3ab174f1e1235.tar.xz linux-53ef7d0e208fa38c3f63d287e0c3ab174f1e1235.zip |
Merge tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams:
"The bulk of this has been in multiple -next releases. There were a few
late breaking fixes and small features that got added in the last
couple days, but the whole set has received a build success
notification from the kbuild robot.
Change summary:
- Region media error reporting: A libnvdimm region device is the
parent to one or more namespaces. To date, media errors have been
reported via the "badblocks" attribute attached to pmem block
devices for namespaces in "raw" or "memory" mode. Given that
namespaces can be in "device-dax" or "btt-sector" mode this new
interface reports media errors generically, i.e. independent of
namespace modes or state.
This subsequently allows userspace tooling to craft "ACPI 6.1
Section 9.20.7.6 Function Index 4 - Clear Uncorrectable Error"
requests and submit them via the ioctl path for NVDIMM root bus
devices.
- Introduce 'struct dax_device' and 'struct dax_operations': Prompted
by a request from Linus and feedback from Christoph this allows for
dax capable drivers to publish their own custom dax operations.
This fixes the broken assumption that all dax operations are
related to a persistent memory device, and makes it easier for
other architectures and platforms to add customized persistent
memory support.
- 'libnvdimm' core updates: A new "deep_flush" sysfs attribute is
available for storage appliance applications to manually trigger
memory controllers to drain write-pending buffers that would
otherwise be flushed automatically by the platform ADR
(asynchronous-DRAM-refresh) mechanism at a power loss event.
Support for "locked" DIMMs is included to prevent namespaces from
surfacing when the namespace label data area is locked. Finally,
fixes for various reported deadlocks and crashes, also tagged for
-stable.
- ACPI / nfit driver updates: General updates of the nfit driver to
add DSM command overrides, ACPI 6.1 health state flags support, DSM
payload debug available by default, and various fixes.
Acknowledgements that came after the branch was pushed:
- commmit 565851c972b5 "device-dax: fix sysfs attribute deadlock":
Tested-by: Yi Zhang <yizhan@redhat.com>
- commit 23f498448362 "libnvdimm: rework region badblocks clearing"
Tested-by: Toshi Kani <toshi.kani@hpe.com>"
* tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (52 commits)
libnvdimm, pfn: fix 'npfns' vs section alignment
libnvdimm: handle locked label storage areas
libnvdimm: convert NDD_ flags to use bitops, introduce NDD_LOCKED
brd: fix uninitialized use of brd->dax_dev
block, dax: use correct format string in bdev_dax_supported
device-dax: fix sysfs attribute deadlock
libnvdimm: restore "libnvdimm: band aid btt vs clear poison locking"
libnvdimm: fix nvdimm_bus_lock() vs device_lock() ordering
libnvdimm: rework region badblocks clearing
acpi, nfit: kill ACPI_NFIT_DEBUG
libnvdimm: fix clear length of nvdimm_forget_poison()
libnvdimm, pmem: fix a NULL pointer BUG in nd_pmem_notify
libnvdimm, region: sysfs trigger for nvdimm_flush()
libnvdimm: fix phys_addr for nvdimm_clear_poison
x86, dax, pmem: remove indirection around memcpy_from_pmem()
block: remove block_device_operations ->direct_access()
block, dax: convert bdev_dax_supported() to dax_direct_access()
filesystem-dax: convert to dax_direct_access()
Revert "block: use DAX for partition table reads"
ext2, ext4, xfs: retrieve dax_device for iomap operations
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/block_dev.c | 117 | ||||
-rw-r--r-- | fs/dax.c | 297 | ||||
-rw-r--r-- | fs/ext2/inode.c | 9 | ||||
-rw-r--r-- | fs/ext4/inode.c | 9 | ||||
-rw-r--r-- | fs/iomap.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 10 |
6 files changed, 219 insertions, 226 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index 0d435c794d76..2a305c1a2d88 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/blkpg.h> #include <linux/magic.h> +#include <linux/dax.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/pagevec.h> @@ -716,50 +717,18 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(bdev_write_page); -/** - * bdev_direct_access() - Get the address for directly-accessibly memory - * @bdev: The device containing the memory - * @dax: control and output parameters for ->direct_access - * - * If a block device is made up of directly addressable memory, this function - * will tell the caller the PFN and the address of the memory. The address - * may be directly dereferenced within the kernel without the need to call - * ioremap(), kmap() or similar. The PFN is suitable for inserting into - * page tables. - * - * Return: negative errno if an error occurs, otherwise the number of bytes - * accessible at this address. - */ -long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax) +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff) { - sector_t sector = dax->sector; - long avail, size = dax->size; - const struct block_device_operations *ops = bdev->bd_disk->fops; + phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; - /* - * The device driver is allowed to sleep, in order to make the - * memory directly accessible. - */ - might_sleep(); - - if (size < 0) - return size; - if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access) - return -EOPNOTSUPP; - if ((sector + DIV_ROUND_UP(size, 512)) > - part_nr_sects_read(bdev->bd_part)) - return -ERANGE; - sector += get_start_sect(bdev); - if (sector % (PAGE_SIZE / 512)) + if (pgoff) + *pgoff = PHYS_PFN(phys_off); + if (phys_off % PAGE_SIZE || size % PAGE_SIZE) return -EINVAL; - avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size); - if (!avail) - return -ERANGE; - if (avail > 0 && avail & ~PAGE_MASK) - return -ENXIO; - return min(avail, size); + return 0; } -EXPORT_SYMBOL_GPL(bdev_direct_access); +EXPORT_SYMBOL(bdev_dax_pgoff); /** * bdev_dax_supported() - Check if the device supports dax for filesystem @@ -773,62 +742,46 @@ EXPORT_SYMBOL_GPL(bdev_direct_access); */ int bdev_dax_supported(struct super_block *sb, int blocksize) { - struct blk_dax_ctl dax = { - .sector = 0, - .size = PAGE_SIZE, - }; - int err; + struct block_device *bdev = sb->s_bdev; + struct dax_device *dax_dev; + pgoff_t pgoff; + int err, id; + void *kaddr; + pfn_t pfn; + long len; if (blocksize != PAGE_SIZE) { vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); return -EINVAL; } - err = bdev_direct_access(sb->s_bdev, &dax); - if (err < 0) { - switch (err) { - case -EOPNOTSUPP: - vfs_msg(sb, KERN_ERR, - "error: device does not support dax"); - break; - case -EINVAL: - vfs_msg(sb, KERN_ERR, - "error: unaligned partition for dax"); - break; - default: - vfs_msg(sb, KERN_ERR, - "error: dax access failed (%d)", err); - } + err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); + if (err) { + vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax"); return err; } - return 0; -} -EXPORT_SYMBOL_GPL(bdev_dax_supported); - -/** - * bdev_dax_capable() - Return if the raw device is capable for dax - * @bdev: The device for raw block device access - */ -bool bdev_dax_capable(struct block_device *bdev) -{ - struct blk_dax_ctl dax = { - .size = PAGE_SIZE, - }; + dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + if (!dax_dev) { + vfs_msg(sb, KERN_ERR, "error: device does not support dax"); + return -EOPNOTSUPP; + } - if (!IS_ENABLED(CONFIG_FS_DAX)) - return false; + id = dax_read_lock(); + len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); + dax_read_unlock(id); - dax.sector = 0; - if (bdev_direct_access(bdev, &dax) < 0) - return false; + put_dax(dax_dev); - dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512); - if (bdev_direct_access(bdev, &dax) < 0) - return false; + if (len < 1) { + vfs_msg(sb, KERN_ERR, + "error: dax access failed (%ld)", len); + return len < 0 ? len : -EIO; + } - return true; + return 0; } +EXPORT_SYMBOL_GPL(bdev_dax_supported); /* * pseudo-fs @@ -55,32 +55,6 @@ static int __init init_dax_wait_table(void) } fs_initcall(init_dax_wait_table); -static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) -{ - struct request_queue *q = bdev->bd_queue; - long rc = -EIO; - - dax->addr = ERR_PTR(-EIO); - if (blk_queue_enter(q, true) != 0) - return rc; - - rc = bdev_direct_access(bdev, dax); - if (rc < 0) { - dax->addr = ERR_PTR(rc); - blk_queue_exit(q); - return rc; - } - return rc; -} - -static void dax_unmap_atomic(struct block_device *bdev, - const struct blk_dax_ctl *dax) -{ - if (IS_ERR(dax->addr)) - return; - blk_queue_exit(bdev->bd_queue); -} - static int dax_is_pmd_entry(void *entry) { return (unsigned long)entry & RADIX_DAX_PMD; @@ -101,26 +75,6 @@ static int dax_is_empty_entry(void *entry) return (unsigned long)entry & RADIX_DAX_EMPTY; } -struct page *read_dax_sector(struct block_device *bdev, sector_t n) -{ - struct page *page = alloc_pages(GFP_KERNEL, 0); - struct blk_dax_ctl dax = { - .size = PAGE_SIZE, - .sector = n & ~((((int) PAGE_SIZE) / 512) - 1), - }; - long rc; - - if (!page) - return ERR_PTR(-ENOMEM); - - rc = dax_map_atomic(bdev, &dax); - if (rc < 0) - return ERR_PTR(rc); - memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE); - dax_unmap_atomic(bdev, &dax); - return page; -} - /* * DAX radix tree locking */ @@ -582,21 +536,30 @@ static int dax_load_hole(struct address_space *mapping, void **entry, return ret; } -static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, - struct page *to, unsigned long vaddr) +static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, + sector_t sector, size_t size, struct page *to, + unsigned long vaddr) { - struct blk_dax_ctl dax = { - .sector = sector, - .size = size, - }; - void *vto; - - if (dax_map_atomic(bdev, &dax) < 0) - return PTR_ERR(dax.addr); + void *vto, *kaddr; + pgoff_t pgoff; + pfn_t pfn; + long rc; + int id; + + rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + if (rc) + return rc; + + id = dax_read_lock(); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); + if (rc < 0) { + dax_read_unlock(id); + return rc; + } vto = kmap_atomic(to); - copy_user_page(vto, (void __force *)dax.addr, vaddr, to); + copy_user_page(vto, (void __force *)kaddr, vaddr, to); kunmap_atomic(vto); - dax_unmap_atomic(bdev, &dax); + dax_read_unlock(id); return 0; } @@ -764,12 +727,16 @@ unlock_pte: } static int dax_writeback_one(struct block_device *bdev, - struct address_space *mapping, pgoff_t index, void *entry) + struct dax_device *dax_dev, struct address_space *mapping, + pgoff_t index, void *entry) { struct radix_tree_root *page_tree = &mapping->page_tree; - struct blk_dax_ctl dax; - void *entry2, **slot; - int ret = 0; + void *entry2, **slot, *kaddr; + long ret = 0, id; + sector_t sector; + pgoff_t pgoff; + size_t size; + pfn_t pfn; /* * A page got tagged dirty in DAX mapping? Something is seriously @@ -818,26 +785,29 @@ static int dax_writeback_one(struct block_device *bdev, * 'entry'. This allows us to flush for PMD_SIZE and not have to * worry about partial PMD writebacks. */ - dax.sector = dax_radix_sector(entry); - dax.size = PAGE_SIZE << dax_radix_order(entry); + sector = dax_radix_sector(entry); + size = PAGE_SIZE << dax_radix_order(entry); + + id = dax_read_lock(); + ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); + if (ret) + goto dax_unlock; /* - * We cannot hold tree_lock while calling dax_map_atomic() because it - * eventually calls cond_resched(). + * dax_direct_access() may sleep, so cannot hold tree_lock over + * its invocation. */ - ret = dax_map_atomic(bdev, &dax); - if (ret < 0) { - put_locked_mapping_entry(mapping, index, entry); - return ret; - } + ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn); + if (ret < 0) + goto dax_unlock; - if (WARN_ON_ONCE(ret < dax.size)) { + if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) { ret = -EIO; - goto unmap; + goto dax_unlock; } - dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn)); - wb_cache_pmem(dax.addr, dax.size); + dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); + wb_cache_pmem(kaddr, size); /* * After we have flushed the cache, we can clear the dirty tag. There * cannot be new dirty data in the pfn after the flush has completed as @@ -847,8 +817,8 @@ static int dax_writeback_one(struct block_device *bdev, spin_lock_irq(&mapping->tree_lock); radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); spin_unlock_irq(&mapping->tree_lock); - unmap: - dax_unmap_atomic(bdev, &dax); + dax_unlock: + dax_read_unlock(id); put_locked_mapping_entry(mapping, index, entry); return ret; @@ -869,6 +839,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct inode *inode = mapping->host; pgoff_t start_index, end_index; pgoff_t indices[PAGEVEC_SIZE]; + struct dax_device *dax_dev; struct pagevec pvec; bool done = false; int i, ret = 0; @@ -879,6 +850,10 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) return 0; + dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + if (!dax_dev) + return -EIO; + start_index = wbc->range_start >> PAGE_SHIFT; end_index = wbc->range_end >> PAGE_SHIFT; @@ -899,38 +874,49 @@ int dax_writeback_mapping_range(struct address_space *mapping, break; } - ret = dax_writeback_one(bdev, mapping, indices[i], - pvec.pages[i]); - if (ret < 0) + ret = dax_writeback_one(bdev, dax_dev, mapping, + indices[i], pvec.pages[i]); + if (ret < 0) { + put_dax(dax_dev); return ret; + } } } + put_dax(dax_dev); return 0; } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); static int dax_insert_mapping(struct address_space *mapping, - struct block_device *bdev, sector_t sector, size_t size, - void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf) + struct block_device *bdev, struct dax_device *dax_dev, + sector_t sector, size_t size, void **entryp, + struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long vaddr = vmf->address; - struct blk_dax_ctl dax = { - .sector = sector, - .size = size, - }; - void *ret; void *entry = *entryp; + void *ret, *kaddr; + pgoff_t pgoff; + int id, rc; + pfn_t pfn; - if (dax_map_atomic(bdev, &dax) < 0) - return PTR_ERR(dax.addr); - dax_unmap_atomic(bdev, &dax); + rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + if (rc) + return rc; - ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0); + id = dax_read_lock(); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); + if (rc < 0) { + dax_read_unlock(id); + return rc; + } + dax_read_unlock(id); + + ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); if (IS_ERR(ret)) return PTR_ERR(ret); *entryp = ret; - return vm_insert_mixed(vma, vaddr, dax.pfn); + return vm_insert_mixed(vma, vaddr, pfn); } /** @@ -979,24 +965,34 @@ static bool dax_range_is_aligned(struct block_device *bdev, return true; } -int __dax_zero_page_range(struct block_device *bdev, sector_t sector, - unsigned int offset, unsigned int length) +int __dax_zero_page_range(struct block_device *bdev, + struct dax_device *dax_dev, sector_t sector, + unsigned int offset, unsigned int size) { - struct blk_dax_ctl dax = { - .sector = sector, - .size = PAGE_SIZE, - }; - - if (dax_range_is_aligned(bdev, offset, length)) { - sector_t start_sector = dax.sector + (offset >> 9); + if (dax_range_is_aligned(bdev, offset, size)) { + sector_t start_sector = sector + (offset >> 9); return blkdev_issue_zeroout(bdev, start_sector, - length >> 9, GFP_NOFS, 0); + size >> 9, GFP_NOFS, 0); } else { - if (dax_map_atomic(bdev, &dax) < 0) - return PTR_ERR(dax.addr); - clear_pmem(dax.addr + offset, length); - dax_unmap_atomic(bdev, &dax); + pgoff_t pgoff; + long rc, id; + void *kaddr; + pfn_t pfn; + + rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + if (rc) + return rc; + + id = dax_read_lock(); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, + &pfn); + if (rc < 0) { + dax_read_unlock(id); + return rc; + } + clear_pmem(kaddr + offset, size); + dax_read_unlock(id); } return 0; } @@ -1011,9 +1007,12 @@ static loff_t dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap) { + struct block_device *bdev = iomap->bdev; + struct dax_device *dax_dev = iomap->dax_dev; struct iov_iter *iter = data; loff_t end = pos + length, done = 0; ssize_t ret = 0; + int id; if (iov_iter_rw(iter) == READ) { end = min(end, i_size_read(inode)); @@ -1038,34 +1037,42 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, (end - 1) >> PAGE_SHIFT); } + id = dax_read_lock(); while (pos < end) { unsigned offset = pos & (PAGE_SIZE - 1); - struct blk_dax_ctl dax = { 0 }; + const size_t size = ALIGN(length + offset, PAGE_SIZE); + const sector_t sector = dax_iomap_sector(iomap, pos); ssize_t map_len; + pgoff_t pgoff; + void *kaddr; + pfn_t pfn; if (fatal_signal_pending(current)) { ret = -EINTR; break; } - dax.sector = dax_iomap_sector(iomap, pos); - dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; - map_len = dax_map_atomic(iomap->bdev, &dax); + ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); + if (ret) + break; + + map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), + &kaddr, &pfn); if (map_len < 0) { ret = map_len; break; } - dax.addr += offset; + map_len = PFN_PHYS(map_len); + kaddr += offset; map_len -= offset; if (map_len > end - pos) map_len = end - pos; if (iov_iter_rw(iter) == WRITE) - map_len = copy_from_iter_pmem(dax.addr, map_len, iter); + map_len = copy_from_iter_pmem(kaddr, map_len, iter); else - map_len = copy_to_iter(dax.addr, map_len, iter); - dax_unmap_atomic(iomap->bdev, &dax); + map_len = copy_to_iter(kaddr, map_len, iter); if (map_len <= 0) { ret = map_len ? map_len : -EFAULT; break; @@ -1075,6 +1082,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, length -= map_len; done += map_len; } + dax_read_unlock(id); return done ? done : ret; } @@ -1181,8 +1189,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, clear_user_highpage(vmf->cow_page, vaddr); break; case IOMAP_MAPPED: - error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE, - vmf->cow_page, vaddr); + error = copy_user_dax(iomap.bdev, iomap.dax_dev, + sector, PAGE_SIZE, vmf->cow_page, vaddr); break; default: WARN_ON_ONCE(1); @@ -1207,8 +1215,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); major = VM_FAULT_MAJOR; } - error = dax_insert_mapping(mapping, iomap.bdev, sector, - PAGE_SIZE, &entry, vmf->vma, vmf); + error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, + sector, PAGE_SIZE, &entry, vmf->vma, vmf); /* -EBUSY is fine, somebody else faulted on the same PTE */ if (error == -EBUSY) error = 0; @@ -1258,41 +1266,48 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, loff_t pos, void **entryp) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; + const sector_t sector = dax_iomap_sector(iomap, pos); + struct dax_device *dax_dev = iomap->dax_dev; struct block_device *bdev = iomap->bdev; struct inode *inode = mapping->host; - struct blk_dax_ctl dax = { - .sector = dax_iomap_sector(iomap, pos), - .size = PMD_SIZE, - }; - long length = dax_map_atomic(bdev, &dax); - void *ret = NULL; - - if (length < 0) /* dax_map_atomic() failed */ + const size_t size = PMD_SIZE; + void *ret = NULL, *kaddr; + long length = 0; + pgoff_t pgoff; + pfn_t pfn; + int id; + + if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) goto fallback; - if (length < PMD_SIZE) - goto unmap_fallback; - if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) - goto unmap_fallback; - if (!pfn_t_devmap(dax.pfn)) - goto unmap_fallback; - - dax_unmap_atomic(bdev, &dax); - ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector, + id = dax_read_lock(); + length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); + if (length < 0) + goto unlock_fallback; + length = PFN_PHYS(length); + + if (length < size) + goto unlock_fallback; + if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) + goto unlock_fallback; + if (!pfn_t_devmap(pfn)) + goto unlock_fallback; + dax_read_unlock(id); + + ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector, RADIX_DAX_PMD); if (IS_ERR(ret)) goto fallback; *entryp = ret; - trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret); + trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, - dax.pfn, vmf->flags & FAULT_FLAG_WRITE); + pfn, vmf->flags & FAULT_FLAG_WRITE); - unmap_fallback: - dax_unmap_atomic(bdev, &dax); +unlock_fallback: + dax_read_unlock(id); fallback: - trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, - dax.pfn, ret); + trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); return VM_FAULT_FALLBACK; } diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 3a38c1b84e3c..26d77f9f8c12 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -799,6 +799,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock, static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap) { + struct block_device *bdev; unsigned int blkbits = inode->i_blkbits; unsigned long first_block = offset >> blkbits; unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits; @@ -812,8 +813,13 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, return ret; iomap->flags = 0; - iomap->bdev = inode->i_sb->s_bdev; + bdev = inode->i_sb->s_bdev; + iomap->bdev = bdev; iomap->offset = (u64)first_block << blkbits; + if (blk_queue_dax(bdev->bd_queue)) + iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + else + iomap->dax_dev = NULL; if (ret == 0) { iomap->type = IOMAP_HOLE; @@ -835,6 +841,7 @@ static int ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { + put_dax(iomap->dax_dev); if (iomap->type == IOMAP_MAPPED && written < length && (flags & IOMAP_WRITE)) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 10b574ab354b..f0729b0705c7 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3305,6 +3305,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait) static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap) { + struct block_device *bdev; unsigned int blkbits = inode->i_blkbits; unsigned long first_block = offset >> blkbits; unsigned long last_block = (offset + length - 1) >> blkbits; @@ -3373,7 +3374,12 @@ retry: } iomap->flags = 0; - iomap->bdev = inode->i_sb->s_bdev; + bdev = inode->i_sb->s_bdev; + iomap->bdev = bdev; + if (blk_queue_dax(bdev->bd_queue)) + iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + else + iomap->dax_dev = NULL; iomap->offset = first_block << blkbits; if (ret == 0) { @@ -3406,6 +3412,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, int blkbits = inode->i_blkbits; bool truncate = false; + put_dax(iomap->dax_dev); if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) return 0; diff --git a/fs/iomap.c b/fs/iomap.c index 1c25ae30500e..4add7d4ad006 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -360,7 +360,8 @@ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, sector_t sector = iomap->blkno + (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9); - return __dax_zero_page_range(iomap->bdev, sector, offset, bytes); + return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector, + offset, bytes); } static loff_t diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 288ee5b840d7..4b47403f8089 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -976,6 +976,7 @@ xfs_file_iomap_begin( int nimaps = 1, error = 0; bool shared = false, trimmed = false; unsigned lockmode; + struct block_device *bdev; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; @@ -1063,6 +1064,14 @@ xfs_file_iomap_begin( } xfs_bmbt_to_iomap(ip, iomap, &imap); + + /* optionally associate a dax device with the iomap bdev */ + bdev = iomap->bdev; + if (blk_queue_dax(bdev->bd_queue)) + iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + else + iomap->dax_dev = NULL; + if (shared) iomap->flags |= IOMAP_F_SHARED; return 0; @@ -1140,6 +1149,7 @@ xfs_file_iomap_end( unsigned flags, struct iomap *iomap) { + put_dax(iomap->dax_dev); if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, length, written, iomap); |