diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 26 | ||||
-rw-r--r-- | block/Makefile | 5 | ||||
-rw-r--r-- | block/bdev.c | 1058 | ||||
-rw-r--r-- | block/bfq-iosched.c | 16 | ||||
-rw-r--r-- | block/bio.c | 2 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 16 | ||||
-rw-r--r-- | block/blk-throttle.c | 1 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/bsg-lib.c | 90 | ||||
-rw-r--r-- | block/bsg.c | 463 | ||||
-rw-r--r-- | block/fops.c | 640 | ||||
-rw-r--r-- | block/genhd.c | 9 | ||||
-rw-r--r-- | block/mq-deadline.c | 12 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 890 |
15 files changed, 1892 insertions, 1340 deletions
diff --git a/block/Kconfig b/block/Kconfig index bac87d773c54..8e28ae7718bd 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -29,35 +29,15 @@ if BLOCK config BLK_RQ_ALLOC_TIME bool -config BLK_SCSI_REQUEST - bool - config BLK_CGROUP_RWSTAT bool -config BLK_DEV_BSG - bool "Block layer SG support v4" - default y - select BLK_SCSI_REQUEST - help - Saying Y here will enable generic SG (SCSI generic) v4 support - for any block device. - - Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 - can handle complicated SCSI commands: tagged variable length cdbs - with bidirectional data transfers and generic request/response - protocols (e.g. Task Management Functions and SMP in Serial - Attached SCSI). - - This option is required by recent UDEV versions to properly - access device serial numbers, etc. - - If unsure, say Y. +config BLK_DEV_BSG_COMMON + tristate config BLK_DEV_BSGLIB bool "Block layer SG support v4 helper lib" - select BLK_DEV_BSG - select BLK_SCSI_REQUEST + select BLK_DEV_BSG_COMMON help Subsystems will normally enable this if needed. Users will not normally need to manually enable this. diff --git a/block/Makefile b/block/Makefile index 1d0d466f2182..41aa1ba69c90 100644 --- a/block/Makefile +++ b/block/Makefile @@ -3,7 +3,7 @@ # Makefile for the kernel block layer # -obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \ +obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ @@ -12,8 +12,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \ disk-events.o obj-$(CONFIG_BOUNCE) += bounce.o -obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o -obj-$(CONFIG_BLK_DEV_BSG) += bsg.o +obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o diff --git a/block/bdev.c b/block/bdev.c new file mode 100644 index 000000000000..cf2780cb44a7 --- /dev/null +++ b/block/bdev.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE + * Copyright (C) 2016 - 2020 Christoph Hellwig + */ + +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/kmod.h> +#include <linux/major.h> +#include <linux/device_cgroup.h> +#include <linux/blkdev.h> +#include <linux/backing-dev.h> +#include <linux/module.h> +#include <linux/blkpg.h> +#include <linux/magic.h> +#include <linux/buffer_head.h> +#include <linux/swap.h> +#include <linux/writeback.h> +#include <linux/mount.h> +#include <linux/pseudo_fs.h> +#include <linux/uio.h> +#include <linux/namei.h> +#include <linux/cleancache.h> +#include <linux/part_stat.h> +#include <linux/uaccess.h> +#include "../fs/internal.h" +#include "blk.h" + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +static inline struct bdev_inode *BDEV_I(struct inode *inode) +{ + return container_of(inode, struct bdev_inode, vfs_inode); +} + +struct block_device *I_BDEV(struct inode *inode) +{ + return &BDEV_I(inode)->bdev; +} +EXPORT_SYMBOL(I_BDEV); + +static void bdev_write_inode(struct block_device *bdev) +{ + struct inode *inode = bdev->bd_inode; + int ret; + + spin_lock(&inode->i_lock); + while (inode->i_state & I_DIRTY) { + spin_unlock(&inode->i_lock); + ret = write_inode_now(inode, true); + if (ret) { + char name[BDEVNAME_SIZE]; + pr_warn_ratelimited("VFS: Dirty inode writeback failed " + "for block device %s (err=%d).\n", + bdevname(bdev, name), ret); + } + spin_lock(&inode->i_lock); + } + spin_unlock(&inode->i_lock); +} + +/* Kill _all_ buffers and pagecache , dirty or not.. */ +static void kill_bdev(struct block_device *bdev) +{ + struct address_space *mapping = bdev->bd_inode->i_mapping; + + if (mapping_empty(mapping)) + return; + + invalidate_bh_lrus(); + truncate_inode_pages(mapping, 0); +} + +/* Invalidate clean unused buffers and pagecache. */ +void invalidate_bdev(struct block_device *bdev) +{ + struct address_space *mapping = bdev->bd_inode->i_mapping; + + if (mapping->nrpages) { + invalidate_bh_lrus(); + lru_add_drain_all(); /* make sure all lru add caches are flushed */ + invalidate_mapping_pages(mapping, 0, -1); + } + /* 99% of the time, we don't need to flush the cleancache on the bdev. + * But, for the strange corners, lets be cautious + */ + cleancache_invalidate_inode(mapping); +} +EXPORT_SYMBOL(invalidate_bdev); + +/* + * Drop all buffers & page cache for given bdev range. This function bails + * with error if bdev has other exclusive owner (such as filesystem). + */ +int truncate_bdev_range(struct block_device *bdev, fmode_t mode, + loff_t lstart, loff_t lend) +{ + /* + * If we don't hold exclusive handle for the device, upgrade to it + * while we discard the buffer cache to avoid discarding buffers + * under live filesystem. + */ + if (!(mode & FMODE_EXCL)) { + int err = bd_prepare_to_claim(bdev, truncate_bdev_range); + if (err) + goto invalidate; + } + + truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); + if (!(mode & FMODE_EXCL)) + bd_abort_claiming(bdev, truncate_bdev_range); + return 0; + +invalidate: + /* + * Someone else has handle exclusively open. Try invalidating instead. + * The 'end' argument is inclusive so the rounding is safe. + */ + return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, + lstart >> PAGE_SHIFT, + lend >> PAGE_SHIFT); +} + +static void set_init_blocksize(struct block_device *bdev) +{ + unsigned int bsize = bdev_logical_block_size(bdev); + loff_t size = i_size_read(bdev->bd_inode); + + while (bsize < PAGE_SIZE) { + if (size & bsize) + break; + bsize <<= 1; + } + bdev->bd_inode->i_blkbits = blksize_bits(bsize); +} + +int set_blocksize(struct block_device *bdev, int size) +{ + /* Size must be a power of two, and between 512 and PAGE_SIZE */ + if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) + return -EINVAL; + + /* Size cannot be smaller than the size supported by the device */ + if (size < bdev_logical_block_size(bdev)) + return -EINVAL; + + /* Don't change the size if it is same as current */ + if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { + sync_blockdev(bdev); + bdev->bd_inode->i_blkbits = blksize_bits(size); + kill_bdev(bdev); + } + return 0; +} + +EXPORT_SYMBOL(set_blocksize); + +int sb_set_blocksize(struct super_block *sb, int size) +{ + if (set_blocksize(sb->s_bdev, size)) + return 0; + /* If we get here, we know size is power of two + * and it's value is between 512 and PAGE_SIZE */ + sb->s_blocksize = size; + sb->s_blocksize_bits = blksize_bits(size); + return sb->s_blocksize; +} + +EXPORT_SYMBOL(sb_set_blocksize); + +int sb_min_blocksize(struct super_block *sb, int size) +{ + int minsize = bdev_logical_block_size(sb->s_bdev); + if (size < minsize) + size = minsize; + return sb_set_blocksize(sb, size); +} + +EXPORT_SYMBOL(sb_min_blocksize); + +int __sync_blockdev(struct block_device *bdev, int wait) +{ + if (!bdev) + return 0; + if (!wait) + return filemap_flush(bdev->bd_inode->i_mapping); + return filemap_write_and_wait(bdev->bd_inode->i_mapping); +} + +/* + * Write out and wait upon all the dirty data associated with a block + * device via its mapping. Does not take the superblock lock. + */ +int sync_blockdev(struct block_device *bdev) +{ + return __sync_blockdev(bdev, 1); +} +EXPORT_SYMBOL(sync_blockdev); + +/* + * Write out and wait upon all dirty data associated with this + * device. Filesystem data as well as the underlying block + * device. Takes the superblock lock. + */ +int fsync_bdev(struct block_device *bdev) +{ + struct super_block *sb = get_super(bdev); + if (sb) { + int res = sync_filesystem(sb); + drop_super(sb); + return res; + } + return sync_blockdev(bdev); +} +EXPORT_SYMBOL(fsync_bdev); + +/** + * freeze_bdev -- lock a filesystem and force it into a consistent state + * @bdev: blockdevice to lock + * + * If a superblock is found on this device, we take the s_umount semaphore + * on it to make sure nobody unmounts until the snapshot creation is done. + * The reference counter (bd_fsfreeze_count) guarantees that only the last + * unfreeze process can unfreeze the frozen filesystem actually when multiple + * freeze requests arrive simultaneously. It counts up in freeze_bdev() and + * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze + * actually. + */ +int freeze_bdev(struct block_device *bdev) +{ + struct super_block *sb; + int error = 0; + + mutex_lock(&bdev->bd_fsfreeze_mutex); + if (++bdev->bd_fsfreeze_count > 1) + goto done; + + sb = get_active_super(bdev); + if (!sb) + goto sync; + if (sb->s_op->freeze_super) + error = sb->s_op->freeze_super(sb); + else + error = freeze_super(sb); + deactivate_super(sb); + + if (error) { + bdev->bd_fsfreeze_count--; + goto done; + } + bdev->bd_fsfreeze_sb = sb; + +sync: + sync_blockdev(bdev); +done: + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return error; +} +EXPORT_SYMBOL(freeze_bdev); + +/** + * thaw_bdev -- unlock filesystem + * @bdev: blockdevice to unlock + * + * Unlocks the filesystem and marks it writeable again after freeze_bdev(). + */ +int thaw_bdev(struct block_device *bdev) +{ + struct super_block *sb; + int error = -EINVAL; + + mutex_lock(&bdev->bd_fsfreeze_mutex); + if (!bdev->bd_fsfreeze_count) + goto out; + + error = 0; + if (--bdev->bd_fsfreeze_count > 0) + goto out; + + sb = bdev->bd_fsfreeze_sb; + if (!sb) + goto out; + + if (sb->s_op->thaw_super) + error = sb->s_op->thaw_super(sb); + else + error = thaw_super(sb); + if (error) + bdev->bd_fsfreeze_count++; + else + bdev->bd_fsfreeze_sb = NULL; +out: + mutex_unlock(&bdev->bd_fsfreeze_mutex); + return error; +} +EXPORT_SYMBOL(thaw_bdev); + +/** + * bdev_read_page() - Start reading a page from a block device + * @bdev: The device to read the page from + * @sector: The offset on the device to read the page to (need not be aligned) + * @page: The page to read + * + * On entry, the page should be locked. It will be unlocked when the page + * has been read. If the block driver implements rw_page synchronously, + * that will be true on exit from this function, but it need not be. + * + * Errors returned by this function are usually "soft", eg out of memory, or + * queue full; callers should try a different route to read this page rather + * than propagate an error back up the stack. + * + * Return: negative errno if an error occurs, 0 if submission was successful. + */ +int bdev_read_page(struct block_device *bdev, sector_t sector, + struct page *page) +{ + const struct block_device_operations *ops = bdev->bd_disk->fops; + int result = -EOPNOTSUPP; + + if (!ops->rw_page || bdev_get_integrity(bdev)) + return result; + + result = blk_queue_enter(bdev->bd_disk->queue, 0); + if (result) + return result; + result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, + REQ_OP_READ); + blk_queue_exit(bdev->bd_disk->queue); + return result; +} + +/** + * bdev_write_page() - Start writing a page to a block device + * @bdev: The device to write the page to + * @sector: The offset on the device to write the page to (need not be aligned) + * @page: The page to write + * @wbc: The writeback_control for the write + * + * On entry, the page should be locked and not currently under writeback. + * On exit, if the write started successfully, the page will be unlocked and + * under writeback. If the write failed already (eg the driver failed to + * queue the page to the device), the page will still be locked. If the + * caller is a ->writepage implementation, it will need to unlock the page. + * + * Errors returned by this function are usually "soft", eg out of memory, or + * queue full; callers should try a different route to write this page rather + * than propagate an error back up the stack. + * + * Return: negative errno if an error occurs, 0 if submission was successful. + */ +int bdev_write_page(struct block_device *bdev, sector_t sector, + struct page *page, struct writeback_control *wbc) +{ + int result; + const struct block_device_operations *ops = bdev->bd_disk->fops; + + if (!ops->rw_page || bdev_get_integrity(bdev)) + return -EOPNOTSUPP; + result = blk_queue_enter(bdev->bd_disk->queue, 0); + if (result) + return result; + + set_page_writeback(page); + result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, + REQ_OP_WRITE); + if (result) { + end_page_writeback(page); + } else { + clean_page_buffers(page); + unlock_page(page); + } + blk_queue_exit(bdev->bd_disk->queue); + return result; +} + +/* + * pseudo-fs + */ + +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); +static struct kmem_cache * bdev_cachep __read_mostly; + +static struct inode *bdev_alloc_inode(struct super_block *sb) +{ + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); + + if (!ei) + return NULL; + memset(&ei->bdev, 0, sizeof(ei->bdev)); + return &ei->vfs_inode; +} + +static void bdev_free_inode(struct inode *inode) +{ + struct block_device *bdev = I_BDEV(inode); + + free_percpu(bdev->bd_stats); + kfree(bdev->bd_meta_info); + + if (!bdev_is_partition(bdev)) { + if (bdev->bd_disk && bdev->bd_disk->bdi) + bdi_put(bdev->bd_disk->bdi); + kfree(bdev->bd_disk); + } + + if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) + blk_free_ext_minor(MINOR(bdev->bd_dev)); + + kmem_cache_free(bdev_cachep, BDEV_I(inode)); +} + +static void init_once(void *data) +{ + struct bdev_inode *ei = data; + + inode_init_once(&ei->vfs_inode); +} + +static void bdev_evict_inode(struct inode *inode) +{ + truncate_inode_pages_final(&inode->i_data); + invalidate_inode_buffers(inode); /* is it needed here? */ + clear_inode(inode); +} + +static const struct super_operations bdev_sops = { + .statfs = simple_statfs, + .alloc_inode = bdev_alloc_inode, + .free_inode = bdev_free_inode, + .drop_inode = generic_delete_inode, + .evict_inode = bdev_evict_inode, +}; + +static int bd_init_fs_context(struct fs_context *fc) +{ + struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); + if (!ctx) + return -ENOMEM; + fc->s_iflags |= SB_I_CGROUPWB; + ctx->ops = &bdev_sops; + return 0; +} + +static struct file_system_type bd_type = { + .name = "bdev", + .init_fs_context = bd_init_fs_context, + .kill_sb = kill_anon_super, +}; + +struct super_block *blockdev_superblock __read_mostly; +EXPORT_SYMBOL_GPL(blockdev_superblock); + +void __init bdev_cache_init(void) +{ + int err; + static struct vfsmount *bd_mnt; + + bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), + 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC), + init_once); + err = register_filesystem(&bd_type); + if (err) + panic("Cannot register bdev pseudo-fs"); + bd_mnt = kern_mount(&bd_type); + if (IS_ERR(bd_mnt)) + panic("Cannot create bdev pseudo-fs"); + blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ +} + +struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) +{ + struct block_device *bdev; + struct inode *inode; + + inode = new_inode(blockdev_superblock); + if (!inode) + return NULL; + inode->i_mode = S_IFBLK; + inode->i_rdev = 0; + inode->i_data.a_ops = &def_blk_aops; + mapping_set_gfp_mask(&inode->i_data, GFP_USER); + + bdev = I_BDEV(inode); + mutex_init(&bdev->bd_fsfreeze_mutex); + spin_lock_init(&bdev->bd_size_lock); + bdev->bd_disk = disk; + bdev->bd_partno = partno; + bdev->bd_inode = inode; + bdev->bd_stats = alloc_percpu(struct disk_stats); + if (!bdev->bd_stats) { + iput(inode); + return NULL; + } + return bdev; +} + +void bdev_add(struct block_device *bdev, dev_t dev) +{ + bdev->bd_dev = dev; + bdev->bd_inode->i_rdev = dev; + bdev->bd_inode->i_ino = dev; + insert_inode_hash(bdev->bd_inode); +} + +long nr_blockdev_pages(void) +{ + struct inode *inode; + long ret = 0; + + spin_lock(&blockdev_superblock->s_inode_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) + ret += inode->i_mapping->nrpages; + spin_unlock(&blockdev_superblock->s_inode_list_lock); + + return ret; +} + +/** + * bd_may_claim - test whether a block device can be claimed + * @bdev: block device of interest + * @whole: whole block device containing @bdev, may equal @bdev + * @holder: holder trying to claim @bdev + * + * Test whether @bdev can be claimed by @holder. + * + * CONTEXT: + * spin_lock(&bdev_lock). + * + * RETURNS: + * %true if @bdev can be claimed, %false otherwise. + */ +static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + if (bdev->bd_holder == holder) + return true; /* already a holder */ + else if (bdev->bd_holder != NULL) + return false; /* held by someone else */ + else if (whole == bdev) + return true; /* is a whole device which isn't held */ + + else if (whole->bd_holder == bd_may_claim) + return true; /* is a partition of a device that is being partitioned */ + else if (whole->bd_holder != NULL) + return false; /* is a partition of a held device */ + else + return true; /* is a partition of an un-held device */ +} + +/** + * bd_prepare_to_claim - claim a block device + * @bdev: block device of interest + * @holder: holder trying to claim @bdev + * + * Claim @bdev. This function fails if @bdev is already claimed by another + * holder and waits if another claiming is in progress. return, the caller + * has ownership of bd_claiming and bd_holder[s]. + * + * RETURNS: + * 0 if @bdev can be claimed, -EBUSY otherwise. + */ +int bd_prepare_to_claim(struct block_device *bdev, void *holder) +{ + struct block_device *whole = bdev_whole(bdev); + + if (WARN_ON_ONCE(!holder)) + return -EINVAL; +retry: + spin_lock(&bdev_lock); + /* if someone else claimed, fail */ + if (!bd_may_claim(bdev, whole, holder)) { + spin_unlock(&bdev_lock); + return -EBUSY; + } + + /* if claiming is already in progress, wait for it to finish */ + if (whole->bd_claiming) { + wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); + DEFINE_WAIT(wait); + + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&bdev_lock); + schedule(); + finish_wait(wq, &wait); + goto retry; + } + + /* yay, all mine */ + whole->bd_claiming = holder; + spin_unlock(&bdev_lock); + return 0; +} +EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ + +static void bd_clear_claiming(struct block_device *whole, void *holder) +{ + lockdep_assert_held(&bdev_lock); + /* tell others that we're done */ + BUG_ON(whole->bd_claiming != holder); + whole->bd_claiming = NULL; + wake_up_bit(&whole->bd_claiming, 0); +} + +/** + * bd_finish_claiming - finish claiming of a block device + * @bdev: block device of interest + * @holder: holder that has claimed @bdev + * + * Finish exclusive open of a block device. Mark the device as exlusively + * open by the holder and wake up all waiters for exclusive open to finish. + */ +static void bd_finish_claiming(struct block_device *bdev, void *holder) +{ + struct block_device *whole = bdev_whole(bdev); + + spin_lock(&bdev_lock); + BUG_ON(!bd_may_claim(bdev, whole, holder)); + /* + * Note that for a whole device bd_holders will be incremented twice, + * and bd_holder will be set to bd_may_claim before being set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_may_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; + bd_clear_claiming(whole, holder); + spin_unlock(&bdev_lock); +} + +/** + * bd_abort_claiming - abort claiming of a block device + * @bdev: block device of interest + * @holder: holder that has claimed @bdev + * + * Abort claiming of a block device when the exclusive open failed. This can be + * also used when exclusive open is not actually desired and we just needed + * to block other exclusive openers for a while. + */ +void bd_abort_claiming(struct block_device *bdev, void *holder) +{ + spin_lock(&bdev_lock); + bd_clear_claiming(bdev_whole(bdev), holder); + spin_unlock(&bdev_lock); +} +EXPORT_SYMBOL(bd_abort_claiming); + +static void blkdev_flush_mapping(struct block_device *bdev) +{ + WARN_ON_ONCE(bdev->bd_holders); + sync_blockdev(bdev); + kill_bdev(bdev); + bdev_write_inode(bdev); +} + +static int blkdev_get_whole(struct block_device *bdev, fmode_t mode) +{ + struct gendisk *disk = bdev->bd_disk; + int ret = 0; + + if (disk->fops->open) { + ret = disk->fops->open(bdev, mode); + if (ret) { + /* avoid ghost partitions on a removed medium */ + if (ret == -ENOMEDIUM && + test_bit(GD_NEED_PART_SCAN, &disk->state)) + bdev_disk_changed(disk, true); + return ret; + } + } + + if (!bdev->bd_openers) + set_init_blocksize(bdev); + if (test_bit(GD_NEED_PART_SCAN, &disk->state)) + bdev_disk_changed(disk, false); + bdev->bd_openers++; + return 0;; +} + +static void blkdev_put_whole(struct block_device *bdev, fmode_t mode) +{ + if (!--bdev->bd_openers) + blkdev_flush_mapping(bdev); + if (bdev->bd_disk->fops->release) + bdev->bd_disk->fops->release(bdev->bd_disk, mode); +} + +static int blkdev_get_part(struct block_device *part, fmode_t mode) +{ + struct gendisk *disk = part->bd_disk; + int ret; + + if (part->bd_openers) + goto done; + + ret = blkdev_get_whole(bdev_whole(part), mode); + if (ret) + return ret; + + ret = -ENXIO; + if (!bdev_nr_sectors(part)) + goto out_blkdev_put; + + disk->open_partitions++; + set_init_blocksize(part); +done: + part->bd_openers++; + return 0; + +out_blkdev_put: + blkdev_put_whole(bdev_whole(part), mode); + return ret; +} + +static void blkdev_put_part(struct block_device *part, fmode_t mode) +{ + struct block_device *whole = bdev_whole(part); + + if (--part->bd_openers) + return; + blkdev_flush_mapping(part); + whole->bd_disk->open_partitions--; + blkdev_put_whole(whole, mode); +} + +struct block_device *blkdev_get_no_open(dev_t dev) +{ + struct block_device *bdev; + struct inode *inode; + + inode = ilookup(blockdev_superblock, dev); + if (!inode) { + blk_request_module(dev); + inode = ilookup(blockdev_superblock, dev); + if (!inode) + return NULL; + } + + /* switch from the inode reference to a device mode one: */ + bdev = &BDEV_I(inode)->bdev; + if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) + bdev = NULL; + iput(inode); + + if (!bdev) + return NULL; + if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) || + !try_module_get(bdev->bd_disk->fops->owner)) { + put_device(&bdev->bd_device); + return NULL; + } + + return bdev; +} + +void blkdev_put_no_open(struct block_device *bdev) +{ + module_put(bdev->bd_disk->fops->owner); + put_device(&bdev->bd_device); +} + +/** + * blkdev_get_by_dev - open a block device by device number + * @dev: device number of block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the block device described by device number @dev. If @mode includes + * %FMODE_EXCL, the block device is opened with exclusive access. Specifying + * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for + * the same @holder. + * + * Use this interface ONLY if you really do not have anything better - i.e. when + * you are behind a truly sucky interface and all you are given is a device + * number. Everything else should use blkdev_get_by_path(). + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Reference to the block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) +{ + bool unblock_events = true; + struct block_device *bdev; + struct gendisk *disk; + int ret; + + ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, + MAJOR(dev), MINOR(dev), + ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) | + ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0)); + if (ret) + return ERR_PTR(ret); + + bdev = blkdev_get_no_open(dev); + if (!bdev) + return ERR_PTR(-ENXIO); + disk = bdev->bd_disk; + + if (mode & FMODE_EXCL) { + ret = bd_prepare_to_claim(bdev, holder); + if (ret) + goto put_blkdev; + } + + disk_block_events(disk); + + mutex_lock(&disk->open_mutex); + ret = -ENXIO; + if (!disk_live(disk)) + goto abort_claiming; + if (bdev_is_partition(bdev)) + ret = blkdev_get_part(bdev, mode); + else + ret = blkdev_get_whole(bdev, mode); + if (ret) + goto abort_claiming; + if (mode & FMODE_EXCL) { + bd_finish_claiming(bdev, holder); + + /* + * Block event polling for write claims if requested. Any write + * holder makes the write_holder state stick until all are + * released. This is good enough and tracking individual + * writeable reference is too fragile given the way @mode is + * used in blkdev_get/put(). + */ + if ((mode & FMODE_WRITE) && !bdev->bd_write_holder && + (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { + bdev->bd_write_holder = true; + unblock_events = false; + } + } + mutex_unlock(&disk->open_mutex); + + if (unblock_events) + disk_unblock_events(disk); + return bdev; + +abort_claiming: + if (mode & FMODE_EXCL) + bd_abort_claiming(bdev, holder); + mutex_unlock(&disk->open_mutex); + disk_unblock_events(disk); +put_blkdev: + blkdev_put_no_open(bdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(blkdev_get_by_dev); + +/** + * blkdev_get_by_path - open a block device by name + * @path: path to the block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the block device described by the device file at @path. If @mode + * includes %FMODE_EXCL, the block device is opened with exclusive access. + * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may + * nest for the same @holder. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Reference to the block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder) +{ + struct block_device *bdev; + dev_t dev; + int error; + + error = lookup_bdev(path, &dev); + if (error) + return ERR_PTR(error); + + bdev = blkdev_get_by_dev(dev, mode, holder); + if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { + blkdev_put(bdev, mode); + return ERR_PTR(-EACCES); + } + + return bdev; +} +EXPORT_SYMBOL(blkdev_get_by_path); + +void blkdev_put(struct block_device *bdev, fmode_t mode) +{ + struct gendisk *disk = bdev->bd_disk; + + /* + * Sync early if it looks like we're the last one. If someone else + * opens the block device between now and the decrement of bd_openers + * then we did a sync that we didn't need to, but that's not the end + * of the world and we want to avoid long (could be several minute) + * syncs while holding the mutex. + */ + if (bdev->bd_openers == 1) + sync_blockdev(bdev); + + mutex_lock(&disk->open_mutex); + if (mode & FMODE_EXCL) { + struct block_device *whole = bdev_whole(bdev); + bool bdev_free; + + /* + * Release a claim on the device. The holder fields + * are protected with bdev_lock. open_mutex is to + * synchronize disk_holder unlinking. + */ + spin_lock(&bdev_lock); + + WARN_ON_ONCE(--bdev->bd_holders < 0); + WARN_ON_ONCE(--whole->bd_holders < 0); + + if ((bdev_free = !bdev->bd_holders)) + bdev->bd_holder = NULL; + if (!whole->bd_holders) + whole->bd_holder = NULL; + + spin_unlock(&bdev_lock); + + /* + * If this was the last claim, remove holder link and + * unblock evpoll if it was a write holder. + */ + if (bdev_free && bdev->bd_write_holder) { + disk_unblock_events(disk); + bdev->bd_write_holder = false; + } + } + + /* + * Trigger event checking and tell drivers to flush MEDIA_CHANGE + * event. This is to ensure detection of media removal commanded + * from userland - e.g. eject(1). + */ + disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); + + if (bdev_is_partition(bdev)) + blkdev_put_part(bdev, mode); + else + blkdev_put_whole(bdev, mode); + mutex_unlock(&disk->open_mutex); + + blkdev_put_no_open(bdev); +} +EXPORT_SYMBOL(blkdev_put); + +/** + * lookup_bdev - lookup a struct block_device by name + * @pathname: special file representing the block device + * @dev: return value of the block device's dev_t + * + * Get a reference to the blockdevice at @pathname in the current + * namespace if possible and return it. Return ERR_PTR(error) + * otherwise. + */ +int lookup_bdev(const char *pathname, dev_t *dev) +{ + struct inode *inode; + struct path path; + int error; + + if (!pathname || !*pathname) + return -EINVAL; + + error = kern_path(pathname, LOOKUP_FOLLOW, &path); + if (error) + return error; + + inode = d_backing_inode(path.dentry); + error = -ENOTBLK; + if (!S_ISBLK(inode->i_mode)) + goto out_path_put; + error = -EACCES; + if (!may_open_dev(&path)) + goto out_path_put; + + *dev = inode->i_rdev; + error = 0; +out_path_put: + path_put(&path); + return error; +} +EXPORT_SYMBOL(lookup_bdev); + +int __invalidate_device(struct block_device *bdev, bool kill_dirty) +{ + struct super_block *sb = get_super(bdev); + int res = 0; + + if (sb) { + /* + * no need to lock the super, get_super holds the + * read mutex so the filesystem cannot go away + * under us (->put_super runs with the write lock + * hold). + */ + shrink_dcache_sb(sb); + res = invalidate_inodes(sb, kill_dirty); + drop_super(sb); + } + invalidate_bdev(bdev); + return res; +} +EXPORT_SYMBOL(__invalidate_device); + +void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) +{ + struct inode *inode, *old_inode = NULL; + + spin_lock(&blockdev_superblock->s_inode_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { + struct address_space *mapping = inode->i_mapping; + struct block_device *bdev; + + spin_lock(&inode->i_lock); + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || + mapping->nrpages == 0) { + spin_unlock(&inode->i_lock); + continue; + } + __iget(inode); + spin_unlock(&inode->i_lock); + spin_unlock(&blockdev_superblock->s_inode_list_lock); + /* + * We hold a reference to 'inode' so it couldn't have been + * removed from s_inodes list while we dropped the + * s_inode_list_lock We cannot iput the inode now as we can + * be holding the last reference and we cannot iput it under + * s_inode_list_lock. So we keep the reference and iput it + * later. + */ + iput(old_inode); + old_inode = inode; + bdev = I_BDEV(inode); + + mutex_lock(&bdev->bd_disk->open_mutex); + if (bdev->bd_openers) + func(bdev, arg); + mutex_unlock(&bdev->bd_disk->open_mutex); + + spin_lock(&blockdev_superblock->s_inode_list_lock); + } + spin_unlock(&blockdev_superblock->s_inode_list_lock); + iput(old_inode); +} diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 480e1a134859..dd13c2bbc29c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Check delayed stable merge for rotational or non-queueing * devs. For this branch to be executed, bfqq must not be @@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; diff --git a/block/bio.c b/block/bio.c index e16849f46b0e..5df3dd282e40 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1688,7 +1688,7 @@ EXPORT_SYMBOL(bioset_init_from_src); /** * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb * @kiocb: kiocb describing the IO - * @nr_iovecs: number of iovecs to pre-allocate + * @nr_vecs: number of iovecs to pre-allocate * @bs: bio_set to allocate from * * Description: diff --git a/block/blk-map.c b/block/blk-map.c index d1448aaad980..4526adde0156 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -309,7 +309,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, static void bio_invalidate_vmalloc_pages(struct bio *bio) { -#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE +#ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE if (bio->bi_private && !op_is_write(bio_op(bio))) { unsigned long i, len = 0; diff --git a/block/blk-mq.c b/block/blk-mq.c index 944049982e6e..108a352051be 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2135,6 +2135,18 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) } } +/* + * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple + * queues. This is important for md arrays to benefit from merging + * requests. + */ +static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) +{ + if (plug->multiple_queues) + return BLK_MAX_REQUEST_COUNT * 4; + return BLK_MAX_REQUEST_COUNT; +} + /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. @@ -2231,7 +2243,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) else last = list_entry_rq(plug->mq_list.prev); - if (request_count >= BLK_MAX_REQUEST_COUNT || (last && + if (request_count >= blk_plug_max_rq_count(plug) || (last && blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_flush_plug_list(plug, false); trace_block_plug(q); @@ -3280,8 +3292,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, set->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, q); - q->sg_reserved_size = INT_MAX; - INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 55c49015e533..7c4e7993ba97 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2458,6 +2458,7 @@ int blk_throtl_init(struct request_queue *q) void blk_throtl_exit(struct request_queue *q) { BUG_ON(!q->td); + del_timer_sync(&q->td->service_queue.pending_timer); throtl_shutdown_wq(q); blkcg_deactivate_policy(q, &blkcg_policy_throtl); free_percpu(q->td->latency_buckets[READ]); diff --git a/block/blk.h b/block/blk.h index 8c96b0c90c48..7d2a0ba7ed21 100644 --- a/block/blk.h +++ b/block/blk.h @@ -373,4 +373,6 @@ static inline void bio_clear_hipri(struct bio *bio) bio->bi_opf &= ~REQ_HIPRI; } +extern const struct address_space_operations def_blk_aops; + #endif /* BLK_INTERNAL_H */ diff --git a/block/bsg-lib.c b/block/bsg-lib.c index a89d80102304..ccb98276c964 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -6,6 +6,7 @@ * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie */ +#include <linux/bsg.h> #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/delay.h> @@ -19,36 +20,44 @@ struct bsg_set { struct blk_mq_tag_set tag_set; + struct bsg_device *bd; bsg_job_fn *job_fn; bsg_timeout_fn *timeout_fn; }; -static int bsg_transport_check_proto(struct sg_io_v4 *hdr) +static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, + fmode_t mode, unsigned int timeout) { + struct bsg_job *job; + struct request *rq; + struct bio *bio; + int ret; + if (hdr->protocol != BSG_PROTOCOL_SCSI || hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - return 0; -} -static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, - fmode_t mode) -{ - struct bsg_job *job = blk_mq_rq_to_pdu(rq); - int ret; + rq = blk_get_request(q, hdr->dout_xfer_len ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + rq->timeout = timeout; + job = blk_mq_rq_to_pdu(rq); job->request_len = hdr->request_len; job->request = memdup_user(uptr64(hdr->request), hdr->request_len); - if (IS_ERR(job->request)) - return PTR_ERR(job->request); + if (IS_ERR(job->request)) { + ret = PTR_ERR(job->request); + goto out_put_request; + } if (hdr->dout_xfer_len && hdr->din_xfer_len) { job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); if (IS_ERR(job->bidi_rq)) { ret = PTR_ERR(job->bidi_rq); - goto out; + goto out_free_job_request; } ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, @@ -63,20 +72,20 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, job->bidi_bio = NULL; } - return 0; + ret = 0; + if (hdr->dout_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), + hdr->dout_xfer_len, GFP_KERNEL); + } else if (hdr->din_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), + hdr->din_xfer_len, GFP_KERNEL); + } -out_free_bidi_rq: - if (job->bidi_rq) - blk_put_request(job->bidi_rq); -out: - kfree(job->request); - return ret; -} + if (ret) + goto out_unmap_bidi_rq; -static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) -{ - struct bsg_job *job = blk_mq_rq_to_pdu(rq); - int ret = 0; + bio = rq->bio; + blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); /* * The assignments below don't make much sense, but are kept for @@ -119,28 +128,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) hdr->din_resid = 0; } - return ret; -} - -static void bsg_transport_free_rq(struct request *rq) -{ - struct bsg_job *job = blk_mq_rq_to_pdu(rq); - - if (job->bidi_rq) { + blk_rq_unmap_user(bio); +out_unmap_bidi_rq: + if (job->bidi_rq) blk_rq_unmap_user(job->bidi_bio); +out_free_bidi_rq: + if (job->bidi_rq) blk_put_request(job->bidi_rq); - } - +out_free_job_request: kfree(job->request); +out_put_request: + blk_put_request(rq); + return ret; } -static const struct bsg_ops bsg_transport_ops = { - .check_proto = bsg_transport_check_proto, - .fill_hdr = bsg_transport_fill_hdr, - .complete_rq = bsg_transport_complete_rq, - .free_rq = bsg_transport_free_rq, -}; - /** * bsg_teardown_job - routine to teardown a bsg job * @kref: kref inside bsg_job that is to be torn down @@ -327,7 +328,7 @@ void bsg_remove_queue(struct request_queue *q) struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); - bsg_unregister_queue(q); + bsg_unregister_queue(bset->bd); blk_cleanup_queue(q); blk_mq_free_tag_set(&bset->tag_set); kfree(bset); @@ -396,10 +397,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); - ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); - if (ret) { - printk(KERN_ERR "%s: bsg interface failed to " - "initialize - register queue\n", dev->kobj.name); + bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn); + if (IS_ERR(bset->bd)) { + ret = PTR_ERR(bset->bd); goto out_cleanup_queue; } diff --git a/block/bsg.c b/block/bsg.c index 1f196563ae6c..351095193788 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -15,343 +15,97 @@ #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_driver.h> #include <scsi/sg.h> #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" -#define bsg_dbg(bd, fmt, ...) \ - pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__) - struct bsg_device { struct request_queue *queue; - spinlock_t lock; - struct hlist_node dev_list; - refcount_t ref_count; - char name[20]; + struct device device; + struct cdev cdev; int max_queue; + unsigned int timeout; + unsigned int reserved_size; + bsg_sg_io_fn *sg_io_fn; }; +static inline struct bsg_device *to_bsg_device(struct inode *inode) +{ + return container_of(inode->i_cdev, struct bsg_device, cdev); +} + #define BSG_DEFAULT_CMDS 64 #define BSG_MAX_DEVS 32768 -static DEFINE_MUTEX(bsg_mutex); -static DEFINE_IDR(bsg_minor_idr); - -#define BSG_LIST_ARRAY_SIZE 8 -static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; - +static DEFINE_IDA(bsg_minor_ida); static struct class *bsg_class; static int bsg_major; -static inline struct hlist_head *bsg_dev_idx_hash(int index) -{ - return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; -} - -#define uptr64(val) ((void __user *)(uintptr_t)(val)) - -static int bsg_scsi_check_proto(struct sg_io_v4 *hdr) -{ - if (hdr->protocol != BSG_PROTOCOL_SCSI || - hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) - return -EINVAL; - return 0; -} - -static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, - fmode_t mode) -{ - struct scsi_request *sreq = scsi_req(rq); - - if (hdr->dout_xfer_len && hdr->din_xfer_len) { - pr_warn_once("BIDI support in bsg has been removed.\n"); - return -EOPNOTSUPP; - } - - sreq->cmd_len = hdr->request_len; - if (sreq->cmd_len > BLK_MAX_CDB) { - sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); - if (!sreq->cmd) - return -ENOMEM; - } - - if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) - return -EFAULT; - if (blk_verify_command(sreq->cmd, mode)) - return -EPERM; - return 0; -} - -static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) +static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr) { - struct scsi_request *sreq = scsi_req(rq); - int ret = 0; - - /* - * fill in all the output members - */ - hdr->device_status = sreq->result & 0xff; - hdr->transport_status = host_byte(sreq->result); - hdr->driver_status = 0; - if (scsi_status_is_check_condition(sreq->result)) - hdr->driver_status = DRIVER_SENSE; - hdr->info = 0; - if (hdr->device_status || hdr->transport_status || hdr->driver_status) - hdr->info |= SG_INFO_CHECK; - hdr->response_len = 0; - - if (sreq->sense_len && hdr->response) { - int len = min_t(unsigned int, hdr->max_response_len, - sreq->sense_len); - - if (copy_to_user(uptr64(hdr->response), sreq->sense, len)) - ret = -EFAULT; - else - hdr->response_len = len; - } + unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT; - if (rq_data_dir(rq) == READ) - hdr->din_resid = sreq->resid_len; - else - hdr->dout_resid = sreq->resid_len; + if (hdr->timeout) + timeout = msecs_to_jiffies(hdr->timeout); + else if (bd->timeout) + timeout = bd->timeout; - return ret; + return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT); } -static void bsg_scsi_free_rq(struct request *rq) +static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg) { - scsi_req_free_cmd(scsi_req(rq)); -} - -static const struct bsg_ops bsg_scsi_ops = { - .check_proto = bsg_scsi_check_proto, - .fill_hdr = bsg_scsi_fill_hdr, - .complete_rq = bsg_scsi_complete_rq, - .free_rq = bsg_scsi_free_rq, -}; - -static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) -{ - struct request *rq; - struct bio *bio; struct sg_io_v4 hdr; int ret; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - - if (!q->bsg_dev.class_dev) - return -ENXIO; - if (hdr.guard != 'Q') return -EINVAL; - ret = q->bsg_dev.ops->check_proto(&hdr); - if (ret) - return ret; - - rq = blk_get_request(q, hdr.dout_xfer_len ? - REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); - if (ret) { - blk_put_request(rq); - return ret; - } - - rq->timeout = msecs_to_jiffies(hdr.timeout); - if (!rq->timeout) - rq->timeout = q->sg_timeout; - if (!rq->timeout) - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - if (rq->timeout < BLK_MIN_SG_TIMEOUT) - rq->timeout = BLK_MIN_SG_TIMEOUT; - - if (hdr.dout_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), - hdr.dout_xfer_len, GFP_KERNEL); - } else if (hdr.din_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), - hdr.din_xfer_len, GFP_KERNEL); - } - - if (ret) - goto out_free_rq; - - bio = rq->bio; - - blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); - ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); - blk_rq_unmap_user(bio); - -out_free_rq: - rq->q->bsg_dev.ops->free_rq(rq); - blk_put_request(rq); + ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr)); if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } -static struct bsg_device *bsg_alloc_device(void) -{ - struct bsg_device *bd; - - bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); - if (unlikely(!bd)) - return NULL; - - spin_lock_init(&bd->lock); - bd->max_queue = BSG_DEFAULT_CMDS; - INIT_HLIST_NODE(&bd->dev_list); - return bd; -} - -static int bsg_put_device(struct bsg_device *bd) -{ - struct request_queue *q = bd->queue; - - mutex_lock(&bsg_mutex); - - if (!refcount_dec_and_test(&bd->ref_count)) { - mutex_unlock(&bsg_mutex); - return 0; - } - - hlist_del(&bd->dev_list); - mutex_unlock(&bsg_mutex); - - bsg_dbg(bd, "tearing down\n"); - - /* - * close can always block - */ - kfree(bd); - blk_put_queue(q); - return 0; -} - -static struct bsg_device *bsg_add_device(struct inode *inode, - struct request_queue *rq, - struct file *file) -{ - struct bsg_device *bd; - unsigned char buf[32]; - - lockdep_assert_held(&bsg_mutex); - - if (!blk_get_queue(rq)) - return ERR_PTR(-ENXIO); - - bd = bsg_alloc_device(); - if (!bd) { - blk_put_queue(rq); - return ERR_PTR(-ENOMEM); - } - - bd->queue = rq; - - refcount_set(&bd->ref_count, 1); - hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); - - strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); - bsg_dbg(bd, "bound to <%s>, max queue %d\n", - format_dev_t(buf, inode->i_rdev), bd->max_queue); - - return bd; -} - -static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) -{ - struct bsg_device *bd; - - lockdep_assert_held(&bsg_mutex); - - hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { - if (bd->queue == q) { - refcount_inc(&bd->ref_count); - goto found; - } - } - bd = NULL; -found: - return bd; -} - -static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) -{ - struct bsg_device *bd; - struct bsg_class_device *bcd; - - /* - * find the class device - */ - mutex_lock(&bsg_mutex); - bcd = idr_find(&bsg_minor_idr, iminor(inode)); - - if (!bcd) { - bd = ERR_PTR(-ENODEV); - goto out_unlock; - } - - bd = __bsg_get_device(iminor(inode), bcd->queue); - if (!bd) - bd = bsg_add_device(inode, bcd->queue, file); - -out_unlock: - mutex_unlock(&bsg_mutex); - return bd; -} - static int bsg_open(struct inode *inode, struct file *file) { - struct bsg_device *bd; - - bd = bsg_get_device(inode, file); - - if (IS_ERR(bd)) - return PTR_ERR(bd); - - file->private_data = bd; + if (!blk_get_queue(to_bsg_device(inode)->queue)) + return -ENXIO; return 0; } static int bsg_release(struct inode *inode, struct file *file) { - struct bsg_device *bd = file->private_data; - - file->private_data = NULL; - return bsg_put_device(bd); + blk_put_queue(to_bsg_device(inode)->queue); + return 0; } static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) { - return put_user(bd->max_queue, uarg); + return put_user(READ_ONCE(bd->max_queue), uarg); } static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) { - int queue; + int max_queue; - if (get_user(queue, uarg)) + if (get_user(max_queue, uarg)) return -EFAULT; - if (queue < 1) + if (max_queue < 1) return -EINVAL; - - spin_lock_irq(&bd->lock); - bd->max_queue = queue; - spin_unlock_irq(&bd->lock); + WRITE_ONCE(bd->max_queue, max_queue); return 0; } static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct bsg_device *bd = file->private_data; + struct bsg_device *bd = to_bsg_device(file_inode(file)); + struct request_queue *q = bd->queue; void __user *uarg = (void __user *) arg; + int __user *intp = uarg; + int val; switch (cmd) { /* @@ -366,17 +120,37 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: + return put_user(30527, intp); case SCSI_IOCTL_GET_IDLUN: + return put_user(0, intp); case SCSI_IOCTL_GET_BUS_NUMBER: + return put_user(0, intp); case SG_SET_TIMEOUT: + if (get_user(val, intp)) + return -EFAULT; + bd->timeout = clock_t_to_jiffies(val); + return 0; case SG_GET_TIMEOUT: + return jiffies_to_clock_t(bd->timeout); case SG_GET_RESERVED_SIZE: + return put_user(min(bd->reserved_size, queue_max_bytes(q)), + intp); case SG_SET_RESERVED_SIZE: + if (get_user(val, intp)) + return -EFAULT; + if (val < 0) + return -EINVAL; + bd->reserved_size = + min_t(unsigned int, val, queue_max_bytes(q)); + return 0; case SG_EMULATED_HOST: - case SCSI_IOCTL_SEND_COMMAND: - return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); + return put_user(1, intp); case SG_IO: - return bsg_sg_io(bd->queue, file->f_mode, uarg); + return bsg_sg_io(bd, file->f_mode, uarg); + case SCSI_IOCTL_SEND_COMMAND: + pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n", + current->comm); + return -EINVAL; default: return -ENOTTY; } @@ -391,92 +165,65 @@ static const struct file_operations bsg_fops = { .llseek = default_llseek, }; -void bsg_unregister_queue(struct request_queue *q) +void bsg_unregister_queue(struct bsg_device *bd) { - struct bsg_class_device *bcd = &q->bsg_dev; - - if (!bcd->class_dev) - return; - - mutex_lock(&bsg_mutex); - idr_remove(&bsg_minor_idr, bcd->minor); - if (q->kobj.sd) - sysfs_remove_link(&q->kobj, "bsg"); - device_unregister(bcd->class_dev); - bcd->class_dev = NULL; - mutex_unlock(&bsg_mutex); + if (bd->queue->kobj.sd) + sysfs_remove_link(&bd->queue->kobj, "bsg"); + cdev_device_del(&bd->cdev, &bd->device); + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); + kfree(bd); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); -int bsg_register_queue(struct request_queue *q, struct device *parent, - const char *name, const struct bsg_ops *ops) +struct bsg_device *bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn) { - struct bsg_class_device *bcd; - dev_t dev; + struct bsg_device *bd; int ret; - struct device *class_dev = NULL; - /* - * we need a proper transport to send commands, not a stacked device - */ - if (!queue_is_mq(q)) - return 0; - - bcd = &q->bsg_dev; - memset(bcd, 0, sizeof(*bcd)); - - mutex_lock(&bsg_mutex); + bd = kzalloc(sizeof(*bd), GFP_KERNEL); + if (!bd) + return ERR_PTR(-ENOMEM); + bd->max_queue = BSG_DEFAULT_CMDS; + bd->reserved_size = INT_MAX; + bd->queue = q; + bd->sg_io_fn = sg_io_fn; - ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); + ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL); if (ret < 0) { - if (ret == -ENOSPC) { - printk(KERN_ERR "bsg: too many bsg devices\n"); - ret = -EINVAL; - } - goto unlock; - } - - bcd->minor = ret; - bcd->queue = q; - bcd->ops = ops; - dev = MKDEV(bsg_major, bcd->minor); - class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name); - if (IS_ERR(class_dev)) { - ret = PTR_ERR(class_dev); - goto idr_remove; + if (ret == -ENOSPC) + dev_err(parent, "bsg: too many bsg devices\n"); + goto out_kfree; } - bcd->class_dev = class_dev; + bd->device.devt = MKDEV(bsg_major, ret); + bd->device.class = bsg_class; + bd->device.parent = parent; + dev_set_name(&bd->device, "%s", name); + device_initialize(&bd->device); + + cdev_init(&bd->cdev, &bsg_fops); + bd->cdev.owner = THIS_MODULE; + ret = cdev_device_add(&bd->cdev, &bd->device); + if (ret) + goto out_ida_remove; if (q->kobj.sd) { - ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); + ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); if (ret) - goto unregister_class_dev; + goto out_device_del; } - mutex_unlock(&bsg_mutex); - return 0; - -unregister_class_dev: - device_unregister(class_dev); -idr_remove: - idr_remove(&bsg_minor_idr, bcd->minor); -unlock: - mutex_unlock(&bsg_mutex); - return ret; -} - -int bsg_scsi_register_queue(struct request_queue *q, struct device *parent) -{ - if (!blk_queue_scsi_passthrough(q)) { - WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); - return -EINVAL; - } + return bd; - return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops); +out_device_del: + cdev_device_del(&bd->cdev, &bd->device); +out_ida_remove: + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); +out_kfree: + kfree(bd); + return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(bsg_scsi_register_queue); - -static struct cdev bsg_cdev; +EXPORT_SYMBOL_GPL(bsg_register_queue); static char *bsg_devnode(struct device *dev, umode_t *mode) { @@ -485,11 +232,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode) static int __init bsg_init(void) { - int ret, i; dev_t devid; - - for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) - INIT_HLIST_HEAD(&bsg_device_list[i]); + int ret; bsg_class = class_create(THIS_MODULE, "bsg"); if (IS_ERR(bsg_class)) @@ -499,19 +243,12 @@ static int __init bsg_init(void) ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; - bsg_major = MAJOR(devid); - cdev_init(&bsg_cdev, &bsg_fops); - ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); - if (ret) - goto unregister_chrdev; - printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; -unregister_chrdev: - unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); + destroy_bsg_class: class_destroy(bsg_class); return ret; diff --git a/block/fops.c b/block/fops.c new file mode 100644 index 000000000000..ffce6f6c68dd --- /dev/null +++ b/block/fops.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE + * Copyright (C) 2016 - 2020 Christoph Hellwig + */ +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/mpage.h> +#include <linux/uio.h> +#include <linux/namei.h> +#include <linux/task_io_accounting_ops.h> +#include <linux/falloc.h> +#include <linux/suspend.h> +#include "blk.h" + +static struct inode *bdev_file_inode(struct file *file) +{ + return file->f_mapping->host; +} + +static int blkdev_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + bh->b_bdev = I_BDEV(inode); + bh->b_blocknr = iblock; + set_buffer_mapped(bh); + return 0; +} + +static unsigned int dio_bio_write_op(struct kiocb *iocb) +{ + unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; + + /* avoid the need for a I/O completion work item */ + if (iocb->ki_flags & IOCB_DSYNC) + op |= REQ_FUA; + return op; +} + +#define DIO_INLINE_BIO_VECS 4 + +static void blkdev_bio_end_io_simple(struct bio *bio) +{ + struct task_struct *waiter = bio->bi_private; + + WRITE_ONCE(bio->bi_private, NULL); + blk_wake_io_task(waiter); +} + +static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, + struct iov_iter *iter, unsigned int nr_pages) +{ + struct file *file = iocb->ki_filp; + struct block_device *bdev = I_BDEV(bdev_file_inode(file)); + struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; + loff_t pos = iocb->ki_pos; + bool should_dirty = false; + struct bio bio; + ssize_t ret; + blk_qc_t qc; + + if ((pos | iov_iter_alignment(iter)) & + (bdev_logical_block_size(bdev) - 1)) + return -EINVAL; + + if (nr_pages <= DIO_INLINE_BIO_VECS) + vecs = inline_vecs; + else { + vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), + GFP_KERNEL); + if (!vecs) + return -ENOMEM; + } + + bio_init(&bio, vecs, nr_pages); + bio_set_dev(&bio, bdev); + bio.bi_iter.bi_sector = pos >> 9; + bio.bi_write_hint = iocb->ki_hint; + bio.bi_private = current; + bio.bi_end_io = blkdev_bio_end_io_simple; + bio.bi_ioprio = iocb->ki_ioprio; + + ret = bio_iov_iter_get_pages(&bio, iter); + if (unlikely(ret)) + goto out; + ret = bio.bi_iter.bi_size; + + if (iov_iter_rw(iter) == READ) { + bio.bi_opf = REQ_OP_READ; + if (iter_is_iovec(iter)) + should_dirty = true; + } else { + bio.bi_opf = dio_bio_write_op(iocb); + task_io_account_write(ret); + } + if (iocb->ki_flags & IOCB_NOWAIT) + bio.bi_opf |= REQ_NOWAIT; + if (iocb->ki_flags & IOCB_HIPRI) + bio_set_polled(&bio, iocb); + + qc = submit_bio(&bio); + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!READ_ONCE(bio.bi_private)) + break; + if (!(iocb->ki_flags & IOCB_HIPRI) || + !blk_poll(bdev_get_queue(bdev), qc, true)) + blk_io_schedule(); + } + __set_current_state(TASK_RUNNING); + + bio_release_pages(&bio, should_dirty); + if (unlikely(bio.bi_status)) + ret = blk_status_to_errno(bio.bi_status); + +out: + if (vecs != inline_vecs) + kfree(vecs); + + bio_uninit(&bio); + + return ret; +} + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + bool multi_bio : 1; + bool should_dirty : 1; + bool is_sync : 1; + struct bio bio; +}; + +static struct bio_set blkdev_dio_pool; + +static int blkdev_iopoll(struct kiocb *kiocb, bool wait) +{ + struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); + struct request_queue *q = bdev_get_queue(bdev); + + return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); +} + +static void blkdev_bio_end_io(struct bio *bio) +{ + struct blkdev_dio *dio = bio->bi_private; + bool should_dirty = dio->should_dirty; + + if (bio->bi_status && !dio->bio.bi_status) + dio->bio.bi_status = bio->bi_status; + + if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { + if (!dio->is_sync) { + struct kiocb *iocb = dio->iocb; + ssize_t ret; + + if (likely(!dio->bio.bi_status)) { + ret = dio->size; + iocb->ki_pos += ret; + } else { + ret = blk_status_to_errno(dio->bio.bi_status); + } + + dio->iocb->ki_complete(iocb, ret, 0); + if (dio->multi_bio) + bio_put(&dio->bio); + } else { + struct task_struct *waiter = dio->waiter; + + WRITE_ONCE(dio->waiter, NULL); + blk_wake_io_task(waiter); + } + } + + if (should_dirty) { + bio_check_pages_dirty(bio); + } else { + bio_release_pages(bio, false); + bio_put(bio); + } +} + +static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, + unsigned int nr_pages) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = bdev_file_inode(file); + struct block_device *bdev = I_BDEV(inode); + struct blk_plug plug; + struct blkdev_dio *dio; + struct bio *bio; + bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; + bool is_read = (iov_iter_rw(iter) == READ), is_sync; + loff_t pos = iocb->ki_pos; + blk_qc_t qc = BLK_QC_T_NONE; + int ret = 0; + + if ((pos | iov_iter_alignment(iter)) & + (bdev_logical_block_size(bdev) - 1)) + return -EINVAL; + + bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + + dio = container_of(bio, struct blkdev_dio, bio); + dio->is_sync = is_sync = is_sync_kiocb(iocb); + if (dio->is_sync) { + dio->waiter = current; + bio_get(bio); + } else { + dio->iocb = iocb; + } + + dio->size = 0; + dio->multi_bio = false; + dio->should_dirty = is_read && iter_is_iovec(iter); + + /* + * Don't plug for HIPRI/polled IO, as those should go straight + * to issue + */ + if (!is_poll) + blk_start_plug(&plug); + + for (;;) { + bio_set_dev(bio, bdev); + bio->bi_iter.bi_sector = pos >> 9; + bio->bi_write_hint = iocb->ki_hint; + bio->bi_private = dio; + bio->bi_end_io = blkdev_bio_end_io; + bio->bi_ioprio = iocb->ki_ioprio; + + ret = bio_iov_iter_get_pages(bio, iter); + if (unlikely(ret)) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + break; + } + + if (is_read) { + bio->bi_opf = REQ_OP_READ; + if (dio->should_dirty) + bio_set_pages_dirty(bio); + } else { + bio->bi_opf = dio_bio_write_op(iocb); + task_io_account_write(bio->bi_iter.bi_size); + } + if (iocb->ki_flags & IOCB_NOWAIT) + bio->bi_opf |= REQ_NOWAIT; + + dio->size += bio->bi_iter.bi_size; + pos += bio->bi_iter.bi_size; + + nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); + if (!nr_pages) { + bool polled = false; + + if (iocb->ki_flags & IOCB_HIPRI) { + bio_set_polled(bio, iocb); + polled = true; + } + + qc = submit_bio(bio); + + if (polled) + WRITE_ONCE(iocb->ki_cookie, qc); + break; + } + + if (!dio->multi_bio) { + /* + * AIO needs an extra reference to ensure the dio + * structure which is embedded into the first bio + * stays around. + */ + if (!is_sync) + bio_get(bio); + dio->multi_bio = true; + atomic_set(&dio->ref, 2); + } else { + atomic_inc(&dio->ref); + } + + submit_bio(bio); + bio = bio_alloc(GFP_KERNEL, nr_pages); + } + + if (!is_poll) + blk_finish_plug(&plug); + + if (!is_sync) + return -EIOCBQUEUED; + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!READ_ONCE(dio->waiter)) + break; + + if (!(iocb->ki_flags & IOCB_HIPRI) || + !blk_poll(bdev_get_queue(bdev), qc, true)) + blk_io_schedule(); + } + __set_current_state(TASK_RUNNING); + + if (!ret) + ret = blk_status_to_errno(dio->bio.bi_status); + if (likely(!ret)) + ret = dio->size; + + bio_put(&dio->bio); + return ret; +} + +static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) +{ + unsigned int nr_pages; + + if (!iov_iter_count(iter)) + return 0; + + nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); + if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS) + return __blkdev_direct_IO_simple(iocb, iter, nr_pages); + + return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); +} + +static int blkdev_writepage(struct page *page, struct writeback_control *wbc) +{ + return block_write_full_page(page, blkdev_get_block, wbc); +} + +static int blkdev_readpage(struct file * file, struct page * page) +{ + return block_read_full_page(page, blkdev_get_block); +} + +static void blkdev_readahead(struct readahead_control *rac) +{ + mpage_readahead(rac, blkdev_get_block); +} + +static int blkdev_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, struct page **pagep, + void **fsdata) +{ + return block_write_begin(mapping, pos, len, flags, pagep, + blkdev_get_block); +} + +static int blkdev_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, struct page *page, + void *fsdata) +{ + int ret; + ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); + + unlock_page(page); + put_page(page); + + return ret; +} + +static int blkdev_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + return generic_writepages(mapping, wbc); +} + +const struct address_space_operations def_blk_aops = { + .set_page_dirty = __set_page_dirty_buffers, + .readpage = blkdev_readpage, + .readahead = blkdev_readahead, + .writepage = blkdev_writepage, + .write_begin = blkdev_write_begin, + .write_end = blkdev_write_end, + .writepages = blkdev_writepages, + .direct_IO = blkdev_direct_IO, + .migratepage = buffer_migrate_page_norefs, + .is_dirty_writeback = buffer_check_dirty_writeback, +}; + +/* + * for a block special file file_inode(file)->i_size is zero + * so we compute the size by hand (just as in block_read/write above) + */ +static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) +{ + struct inode *bd_inode = bdev_file_inode(file); + loff_t retval; + + inode_lock(bd_inode); + retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); + inode_unlock(bd_inode); + return retval; +} + +static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) +{ + struct inode *bd_inode = bdev_file_inode(filp); + struct block_device *bdev = I_BDEV(bd_inode); + int error; + + error = file_write_and_wait_range(filp, start, end); + if (error) + return error; + + /* + * There is no need to serialise calls to blkdev_issue_flush with + * i_mutex and doing so causes performance issues with concurrent + * O_SYNC writers to a block device. + */ + error = blkdev_issue_flush(bdev); + if (error == -EOPNOTSUPP) + error = 0; + + return error; +} + +static int blkdev_open(struct inode *inode, struct file *filp) +{ + struct block_device *bdev; + + /* + * Preserve backwards compatibility and allow large file access + * even if userspace doesn't ask for it explicitly. Some mkfs + * binary needs it. We might want to drop this workaround + * during an unstable branch. + */ + filp->f_flags |= O_LARGEFILE; + filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; + + if (filp->f_flags & O_NDELAY) + filp->f_mode |= FMODE_NDELAY; + if (filp->f_flags & O_EXCL) + filp->f_mode |= FMODE_EXCL; + if ((filp->f_flags & O_ACCMODE) == 3) + filp->f_mode |= FMODE_WRITE_IOCTL; + + bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + filp->f_mapping = bdev->bd_inode->i_mapping; + filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); + return 0; +} + +static int blkdev_close(struct inode *inode, struct file *filp) +{ + struct block_device *bdev = I_BDEV(bdev_file_inode(filp)); + + blkdev_put(bdev, filp->f_mode); + return 0; +} + +static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + struct block_device *bdev = I_BDEV(bdev_file_inode(file)); + fmode_t mode = file->f_mode; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ + if (file->f_flags & O_NDELAY) + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; + + return blkdev_ioctl(bdev, mode, cmd, arg); +} + +/* + * Write data to the block device. Only intended for the block device itself + * and the raw driver which basically is a fake block device. + * + * Does not take i_mutex for the write and thus is not for general purpose + * use. + */ +static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct inode *bd_inode = bdev_file_inode(file); + loff_t size = i_size_read(bd_inode); + struct blk_plug plug; + size_t shorted = 0; + ssize_t ret; + + if (bdev_read_only(I_BDEV(bd_inode))) + return -EPERM; + + if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) + return -ETXTBSY; + + if (!iov_iter_count(from)) + return 0; + + if (iocb->ki_pos >= size) + return -ENOSPC; + + if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) + return -EOPNOTSUPP; + + size -= iocb->ki_pos; + if (iov_iter_count(from) > size) { + shorted = iov_iter_count(from) - size; + iov_iter_truncate(from, size); + } + + blk_start_plug(&plug); + ret = __generic_file_write_iter(iocb, from); + if (ret > 0) + ret = generic_write_sync(iocb, ret); + iov_iter_reexpand(from, iov_iter_count(from) + shorted); + blk_finish_plug(&plug); + return ret; +} + +static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct file *file = iocb->ki_filp; + struct inode *bd_inode = bdev_file_inode(file); + loff_t size = i_size_read(bd_inode); + loff_t pos = iocb->ki_pos; + size_t shorted = 0; + ssize_t ret; + + if (pos >= size) + return 0; + + size -= pos; + if (iov_iter_count(to) > size) { + shorted = iov_iter_count(to) - size; + iov_iter_truncate(to, size); + } + + ret = generic_file_read_iter(iocb, to); + iov_iter_reexpand(to, iov_iter_count(to) + shorted); + return ret; +} + +#define BLKDEV_FALLOC_FL_SUPPORTED \ + (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ + FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) + +static long blkdev_fallocate(struct file *file, int mode, loff_t start, + loff_t len) +{ + struct block_device *bdev = I_BDEV(bdev_file_inode(file)); + loff_t end = start + len - 1; + loff_t isize; + int error; + + /* Fail if we don't recognize the flags. */ + if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) + return -EOPNOTSUPP; + + /* Don't go off the end of the device. */ + isize = i_size_read(bdev->bd_inode); + if (start >= isize) + return -EINVAL; + if (end >= isize) { + if (mode & FALLOC_FL_KEEP_SIZE) { + len = isize - start; + end = start + len - 1; + } else + return -EINVAL; + } + + /* + * Don't allow IO that isn't aligned to logical block size. + */ + if ((start | len) & (bdev_logical_block_size(bdev) - 1)) + return -EINVAL; + + /* Invalidate the page cache, including dirty pages. */ + error = truncate_bdev_range(bdev, file->f_mode, start, end); + if (error) + return error; + + switch (mode) { + case FALLOC_FL_ZERO_RANGE: + case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: + error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); + break; + case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: + error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, + GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK); + break; + case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: + error = blkdev_issue_discard(bdev, start >> 9, len >> 9, + GFP_KERNEL, 0); + break; + default: + return -EOPNOTSUPP; + } + if (error) + return error; + + /* + * Invalidate the page cache again; if someone wandered in and dirtied + * a page, we just discard it - userspace has no way of knowing whether + * the write happened before or after discard completing... + */ + return truncate_bdev_range(bdev, file->f_mode, start, end); +} + +const struct file_operations def_blk_fops = { + .open = blkdev_open, + .release = blkdev_close, + .llseek = blkdev_llseek, + .read_iter = blkdev_read_iter, + .write_iter = blkdev_write_iter, + .iopoll = blkdev_iopoll, + .mmap = generic_file_mmap, + .fsync = blkdev_fsync, + .unlocked_ioctl = block_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_blkdev_ioctl, +#endif + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, + .fallocate = blkdev_fallocate, +}; + +static __init int blkdev_init(void) +{ + return bioset_init(&blkdev_dio_pool, 4, + offsetof(struct blkdev_dio, bio), + BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); +} +module_init(blkdev_init); diff --git a/block/genhd.c b/block/genhd.c index 567549a011d1..7b6e5e1cf956 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -183,6 +183,7 @@ static struct blk_major_name { void (*probe)(dev_t devt); } *major_names[BLKDEV_MAJOR_HASH_SIZE]; static DEFINE_MUTEX(major_names_lock); +static DEFINE_SPINLOCK(major_names_spinlock); /* index in the above - for now: assume no multimajor ranges */ static inline int major_to_index(unsigned major) @@ -195,11 +196,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset) { struct blk_major_name *dp; - mutex_lock(&major_names_lock); + spin_lock(&major_names_spinlock); for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next) if (dp->major == offset) seq_printf(seqf, "%3d %s\n", dp->major, dp->name); - mutex_unlock(&major_names_lock); + spin_unlock(&major_names_spinlock); } #endif /* CONFIG_PROC_FS */ @@ -271,6 +272,7 @@ int __register_blkdev(unsigned int major, const char *name, p->next = NULL; index = major_to_index(major); + spin_lock(&major_names_spinlock); for (n = &major_names[index]; *n; n = &(*n)->next) { if ((*n)->major == major) break; @@ -279,6 +281,7 @@ int __register_blkdev(unsigned int major, const char *name, *n = p; else ret = -EBUSY; + spin_unlock(&major_names_spinlock); if (ret < 0) { printk("register_blkdev: cannot get major %u for %s\n", @@ -298,6 +301,7 @@ void unregister_blkdev(unsigned int major, const char *name) int index = major_to_index(major); mutex_lock(&major_names_lock); + spin_lock(&major_names_spinlock); for (n = &major_names[index]; *n; n = &(*n)->next) if ((*n)->major == major) break; @@ -307,6 +311,7 @@ void unregister_blkdev(unsigned int major, const char *name) p = *n; *n = p->next; } + spin_unlock(&major_names_spinlock); mutex_unlock(&major_names_lock); kfree(p); } diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3c3693c34f06..7f3c3932b723 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -270,12 +270,6 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, deadline_remove_request(rq->q, per_prio, rq); } -/* Number of requests queued for a given priority level. */ -static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) -{ - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); -} - /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) @@ -953,6 +947,12 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } +/* Number of requests queued for a given priority level. */ +static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); +} + static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c deleted file mode 100644 index d247431a6853..000000000000 --- a/block/scsi_ioctl.c +++ /dev/null @@ -1,890 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2001 Jens Axboe <axboe@suse.de> - */ -#include <linux/compat.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/module.h> -#include <linux/blkdev.h> -#include <linux/capability.h> -#include <linux/completion.h> -#include <linux/cdrom.h> -#include <linux/ratelimit.h> -#include <linux/slab.h> -#include <linux/times.h> -#include <linux/uio.h> -#include <linux/uaccess.h> - -#include <scsi/scsi.h> -#include <scsi/scsi_ioctl.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/sg.h> - -struct blk_cmd_filter { - unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; - unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; -}; - -static struct blk_cmd_filter blk_default_cmd_filter; - -/* Command group 3 is reserved and should never be used. */ -const unsigned char scsi_command_size_tbl[8] = -{ - 6, 10, 10, 12, - 16, 12, 10, 10 -}; -EXPORT_SYMBOL(scsi_command_size_tbl); - -static int sg_get_version(int __user *p) -{ - static const int sg_version_num = 30527; - return put_user(sg_version_num, p); -} - -static int scsi_get_idlun(struct request_queue *q, int __user *p) -{ - return put_user(0, p); -} - -static int scsi_get_bus(struct request_queue *q, int __user *p) -{ - return put_user(0, p); -} - -static int sg_get_timeout(struct request_queue *q) -{ - return jiffies_to_clock_t(q->sg_timeout); -} - -static int sg_set_timeout(struct request_queue *q, int __user *p) -{ - int timeout, err = get_user(timeout, p); - - if (!err) - q->sg_timeout = clock_t_to_jiffies(timeout); - - return err; -} - -static int max_sectors_bytes(struct request_queue *q) -{ - unsigned int max_sectors = queue_max_sectors(q); - - max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); - - return max_sectors << 9; -} - -static int sg_get_reserved_size(struct request_queue *q, int __user *p) -{ - int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); - - return put_user(val, p); -} - -static int sg_set_reserved_size(struct request_queue *q, int __user *p) -{ - int size, err = get_user(size, p); - - if (err) - return err; - - if (size < 0) - return -EINVAL; - - q->sg_reserved_size = min(size, max_sectors_bytes(q)); - return 0; -} - -/* - * will always return that we are ATAPI even for a real SCSI drive, I'm not - * so sure this is worth doing anything about (why would you care??) - */ -static int sg_emulated_host(struct request_queue *q, int __user *p) -{ - return put_user(1, p); -} - -static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) -{ - /* Basic read-only commands */ - __set_bit(TEST_UNIT_READY, filter->read_ok); - __set_bit(REQUEST_SENSE, filter->read_ok); - __set_bit(READ_6, filter->read_ok); - __set_bit(READ_10, filter->read_ok); - __set_bit(READ_12, filter->read_ok); - __set_bit(READ_16, filter->read_ok); - __set_bit(READ_BUFFER, filter->read_ok); - __set_bit(READ_DEFECT_DATA, filter->read_ok); - __set_bit(READ_CAPACITY, filter->read_ok); - __set_bit(READ_LONG, filter->read_ok); - __set_bit(INQUIRY, filter->read_ok); - __set_bit(MODE_SENSE, filter->read_ok); - __set_bit(MODE_SENSE_10, filter->read_ok); - __set_bit(LOG_SENSE, filter->read_ok); - __set_bit(START_STOP, filter->read_ok); - __set_bit(GPCMD_VERIFY_10, filter->read_ok); - __set_bit(VERIFY_16, filter->read_ok); - __set_bit(REPORT_LUNS, filter->read_ok); - __set_bit(SERVICE_ACTION_IN_16, filter->read_ok); - __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok); - __set_bit(MAINTENANCE_IN, filter->read_ok); - __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok); - - /* Audio CD commands */ - __set_bit(GPCMD_PLAY_CD, filter->read_ok); - __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok); - __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok); - __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok); - __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok); - - /* CD/DVD data reading */ - __set_bit(GPCMD_READ_CD, filter->read_ok); - __set_bit(GPCMD_READ_CD_MSF, filter->read_ok); - __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok); - __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok); - __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok); - __set_bit(GPCMD_READ_HEADER, filter->read_ok); - __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok); - __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok); - __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok); - __set_bit(GPCMD_REPORT_KEY, filter->read_ok); - __set_bit(GPCMD_SCAN, filter->read_ok); - __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok); - __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok); - __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok); - __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok); - __set_bit(GPCMD_SEEK, filter->read_ok); - __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok); - - /* Basic writing commands */ - __set_bit(WRITE_6, filter->write_ok); - __set_bit(WRITE_10, filter->write_ok); - __set_bit(WRITE_VERIFY, filter->write_ok); - __set_bit(WRITE_12, filter->write_ok); - __set_bit(WRITE_VERIFY_12, filter->write_ok); - __set_bit(WRITE_16, filter->write_ok); - __set_bit(WRITE_LONG, filter->write_ok); - __set_bit(WRITE_LONG_2, filter->write_ok); - __set_bit(WRITE_SAME, filter->write_ok); - __set_bit(WRITE_SAME_16, filter->write_ok); - __set_bit(WRITE_SAME_32, filter->write_ok); - __set_bit(ERASE, filter->write_ok); - __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); - __set_bit(MODE_SELECT, filter->write_ok); - __set_bit(LOG_SELECT, filter->write_ok); - __set_bit(GPCMD_BLANK, filter->write_ok); - __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok); - __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok); - __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok); - __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok); - __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok); - __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok); - __set_bit(GPCMD_SEND_EVENT, filter->write_ok); - __set_bit(GPCMD_SEND_KEY, filter->write_ok); - __set_bit(GPCMD_SEND_OPC, filter->write_ok); - __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok); - __set_bit(GPCMD_SET_SPEED, filter->write_ok); - __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok); - __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok); - __set_bit(GPCMD_SET_STREAMING, filter->write_ok); - __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); - - /* ZBC Commands */ - __set_bit(ZBC_OUT, filter->write_ok); - __set_bit(ZBC_IN, filter->read_ok); -} - -int blk_verify_command(unsigned char *cmd, fmode_t mode) -{ - struct blk_cmd_filter *filter = &blk_default_cmd_filter; - - /* root can do any command. */ - if (capable(CAP_SYS_RAWIO)) - return 0; - - /* Anybody who can open the device can do a read-safe command */ - if (test_bit(cmd[0], filter->read_ok)) - return 0; - - /* Write-safe commands require a writable open */ - if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE)) - return 0; - - return -EPERM; -} -EXPORT_SYMBOL(blk_verify_command); - -static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, - struct sg_io_hdr *hdr, fmode_t mode) -{ - struct scsi_request *req = scsi_req(rq); - - if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len)) - return -EFAULT; - if (blk_verify_command(req->cmd, mode)) - return -EPERM; - - /* - * fill in request structure - */ - req->cmd_len = hdr->cmd_len; - - rq->timeout = msecs_to_jiffies(hdr->timeout); - if (!rq->timeout) - rq->timeout = q->sg_timeout; - if (!rq->timeout) - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - if (rq->timeout < BLK_MIN_SG_TIMEOUT) - rq->timeout = BLK_MIN_SG_TIMEOUT; - - return 0; -} - -static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, - struct bio *bio) -{ - struct scsi_request *req = scsi_req(rq); - int r, ret = 0; - - /* - * fill in all the output members - */ - hdr->status = req->result & 0xff; - hdr->masked_status = status_byte(req->result); - hdr->msg_status = COMMAND_COMPLETE; - hdr->host_status = host_byte(req->result); - hdr->driver_status = 0; - if (scsi_status_is_check_condition(hdr->status)) - hdr->driver_status = DRIVER_SENSE; - hdr->info = 0; - if (hdr->masked_status || hdr->host_status || hdr->driver_status) - hdr->info |= SG_INFO_CHECK; - hdr->resid = req->resid_len; - hdr->sb_len_wr = 0; - - if (req->sense_len && hdr->sbp) { - int len = min((unsigned int) hdr->mx_sb_len, req->sense_len); - - if (!copy_to_user(hdr->sbp, req->sense, len)) - hdr->sb_len_wr = len; - else - ret = -EFAULT; - } - - r = blk_rq_unmap_user(bio); - if (!ret) - ret = r; - - return ret; -} - -static int sg_io(struct request_queue *q, struct gendisk *bd_disk, - struct sg_io_hdr *hdr, fmode_t mode) -{ - unsigned long start_time; - ssize_t ret = 0; - int writing = 0; - int at_head = 0; - struct request *rq; - struct scsi_request *req; - struct bio *bio; - - if (hdr->interface_id != 'S') - return -EINVAL; - - if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) - return -EIO; - - if (hdr->dxfer_len) - switch (hdr->dxfer_direction) { - default: - return -EINVAL; - case SG_DXFER_TO_DEV: - writing = 1; - break; - case SG_DXFER_TO_FROM_DEV: - case SG_DXFER_FROM_DEV: - break; - } - if (hdr->flags & SG_FLAG_Q_AT_HEAD) - at_head = 1; - - ret = -ENOMEM; - rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); - if (IS_ERR(rq)) - return PTR_ERR(rq); - req = scsi_req(rq); - - if (hdr->cmd_len > BLK_MAX_CDB) { - req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); - if (!req->cmd) - goto out_put_request; - } - - ret = blk_fill_sghdr_rq(q, rq, hdr, mode); - if (ret < 0) - goto out_free_cdb; - - ret = 0; - if (hdr->iovec_count) { - struct iov_iter i; - struct iovec *iov = NULL; - - ret = import_iovec(rq_data_dir(rq), hdr->dxferp, - hdr->iovec_count, 0, &iov, &i); - if (ret < 0) - goto out_free_cdb; - - /* SG_IO howto says that the shorter of the two wins */ - iov_iter_truncate(&i, hdr->dxfer_len); - - ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); - kfree(iov); - } else if (hdr->dxfer_len) - ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, - GFP_KERNEL); - - if (ret) - goto out_free_cdb; - - bio = rq->bio; - req->retries = 0; - - start_time = jiffies; - - blk_execute_rq(bd_disk, rq, at_head); - - hdr->duration = jiffies_to_msecs(jiffies - start_time); - - ret = blk_complete_sghdr_rq(rq, hdr, bio); - -out_free_cdb: - scsi_req_free_cmd(req); -out_put_request: - blk_put_request(rq); - return ret; -} - -/** - * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl - * @q: request queue to send scsi commands down - * @disk: gendisk to operate on (option) - * @mode: mode used to open the file through which the ioctl has been - * submitted - * @sic: userspace structure describing the command to perform - * - * Send down the scsi command described by @sic to the device below - * the request queue @q. If @file is non-NULL it's used to perform - * fine-grained permission checks that allow users to send down - * non-destructive SCSI commands. If the caller has a struct gendisk - * available it should be passed in as @disk to allow the low level - * driver to use the information contained in it. A non-NULL @disk - * is only allowed if the caller knows that the low level driver doesn't - * need it (e.g. in the scsi subsystem). - * - * Notes: - * - This interface is deprecated - users should use the SG_IO - * interface instead, as this is a more flexible approach to - * performing SCSI commands on a device. - * - The SCSI command length is determined by examining the 1st byte - * of the given command. There is no way to override this. - * - Data transfers are limited to PAGE_SIZE - * - The length (x + y) must be at least OMAX_SB_LEN bytes long to - * accommodate the sense buffer when an error occurs. - * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that - * old code will not be surprised. - * - If a Unix error occurs (e.g. ENOMEM) then the user will receive - * a negative return and the Unix error code in 'errno'. - * If the SCSI command succeeds then 0 is returned. - * Positive numbers returned are the compacted SCSI error codes (4 - * bytes in one int) where the lowest byte is the SCSI status. - */ -int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, - struct scsi_ioctl_command __user *sic) -{ - enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */ - struct request *rq; - struct scsi_request *req; - int err; - unsigned int in_len, out_len, bytes, opcode, cmdlen; - char *buffer = NULL; - - if (!sic) - return -EINVAL; - - /* - * get in an out lengths, verify they don't exceed a page worth of data - */ - if (get_user(in_len, &sic->inlen)) - return -EFAULT; - if (get_user(out_len, &sic->outlen)) - return -EFAULT; - if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) - return -EINVAL; - if (get_user(opcode, sic->data)) - return -EFAULT; - - bytes = max(in_len, out_len); - if (bytes) { - buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN); - if (!buffer) - return -ENOMEM; - - } - - rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto error_free_buffer; - } - req = scsi_req(rq); - - cmdlen = COMMAND_SIZE(opcode); - - /* - * get command and data to send to device, if any - */ - err = -EFAULT; - req->cmd_len = cmdlen; - if (copy_from_user(req->cmd, sic->data, cmdlen)) - goto error; - - if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) - goto error; - - err = blk_verify_command(req->cmd, mode); - if (err) - goto error; - - /* default. possible overriden later */ - req->retries = 5; - - switch (opcode) { - case SEND_DIAGNOSTIC: - case FORMAT_UNIT: - rq->timeout = FORMAT_UNIT_TIMEOUT; - req->retries = 1; - break; - case START_STOP: - rq->timeout = START_STOP_TIMEOUT; - break; - case MOVE_MEDIUM: - rq->timeout = MOVE_MEDIUM_TIMEOUT; - break; - case READ_ELEMENT_STATUS: - rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; - break; - case READ_DEFECT_DATA: - rq->timeout = READ_DEFECT_DATA_TIMEOUT; - req->retries = 1; - break; - default: - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - break; - } - - if (bytes) { - err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); - if (err) - goto error; - } - - blk_execute_rq(disk, rq, 0); - - err = req->result & 0xff; /* only 8 bit SCSI status */ - if (err) { - if (req->sense_len && req->sense) { - bytes = (OMAX_SB_LEN > req->sense_len) ? - req->sense_len : OMAX_SB_LEN; - if (copy_to_user(sic->data, req->sense, bytes)) - err = -EFAULT; - } - } else { - if (copy_to_user(sic->data, buffer, out_len)) - err = -EFAULT; - } - -error: - blk_put_request(rq); - -error_free_buffer: - kfree(buffer); - - return err; -} -EXPORT_SYMBOL_GPL(sg_scsi_ioctl); - -/* Send basic block requests */ -static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, - int cmd, int data) -{ - struct request *rq; - int err; - - rq = blk_get_request(q, REQ_OP_DRV_OUT, 0); - if (IS_ERR(rq)) - return PTR_ERR(rq); - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - scsi_req(rq)->cmd[0] = cmd; - scsi_req(rq)->cmd[4] = data; - scsi_req(rq)->cmd_len = 6; - blk_execute_rq(bd_disk, rq, 0); - err = scsi_req(rq)->result ? -EIO : 0; - blk_put_request(rq); - - return err; -} - -static inline int blk_send_start_stop(struct request_queue *q, - struct gendisk *bd_disk, int data) -{ - return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); -} - -int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp) -{ -#ifdef CONFIG_COMPAT - if (in_compat_syscall()) { - struct compat_sg_io_hdr hdr32 = { - .interface_id = hdr->interface_id, - .dxfer_direction = hdr->dxfer_direction, - .cmd_len = hdr->cmd_len, - .mx_sb_len = hdr->mx_sb_len, - .iovec_count = hdr->iovec_count, - .dxfer_len = hdr->dxfer_len, - .dxferp = (uintptr_t)hdr->dxferp, - .cmdp = (uintptr_t)hdr->cmdp, - .sbp = (uintptr_t)hdr->sbp, - .timeout = hdr->timeout, - .flags = hdr->flags, - .pack_id = hdr->pack_id, - .usr_ptr = (uintptr_t)hdr->usr_ptr, - .status = hdr->status, - .masked_status = hdr->masked_status, - .msg_status = hdr->msg_status, - .sb_len_wr = hdr->sb_len_wr, - .host_status = hdr->host_status, - .driver_status = hdr->driver_status, - .resid = hdr->resid, - .duration = hdr->duration, - .info = hdr->info, - }; - - if (copy_to_user(argp, &hdr32, sizeof(hdr32))) - return -EFAULT; - - return 0; - } -#endif - - if (copy_to_user(argp, hdr, sizeof(*hdr))) - return -EFAULT; - - return 0; -} -EXPORT_SYMBOL(put_sg_io_hdr); - -int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp) -{ -#ifdef CONFIG_COMPAT - struct compat_sg_io_hdr hdr32; - - if (in_compat_syscall()) { - if (copy_from_user(&hdr32, argp, sizeof(hdr32))) - return -EFAULT; - - *hdr = (struct sg_io_hdr) { - .interface_id = hdr32.interface_id, - .dxfer_direction = hdr32.dxfer_direction, - .cmd_len = hdr32.cmd_len, - .mx_sb_len = hdr32.mx_sb_len, - .iovec_count = hdr32.iovec_count, - .dxfer_len = hdr32.dxfer_len, - .dxferp = compat_ptr(hdr32.dxferp), - .cmdp = compat_ptr(hdr32.cmdp), - .sbp = compat_ptr(hdr32.sbp), - .timeout = hdr32.timeout, - .flags = hdr32.flags, - .pack_id = hdr32.pack_id, - .usr_ptr = compat_ptr(hdr32.usr_ptr), - .status = hdr32.status, - .masked_status = hdr32.masked_status, - .msg_status = hdr32.msg_status, - .sb_len_wr = hdr32.sb_len_wr, - .host_status = hdr32.host_status, - .driver_status = hdr32.driver_status, - .resid = hdr32.resid, - .duration = hdr32.duration, - .info = hdr32.info, - }; - - return 0; - } -#endif - - if (copy_from_user(hdr, argp, sizeof(*hdr))) - return -EFAULT; - - return 0; -} -EXPORT_SYMBOL(get_sg_io_hdr); - -#ifdef CONFIG_COMPAT -struct compat_cdrom_generic_command { - unsigned char cmd[CDROM_PACKET_SIZE]; - compat_caddr_t buffer; - compat_uint_t buflen; - compat_int_t stat; - compat_caddr_t sense; - unsigned char data_direction; - unsigned char pad[3]; - compat_int_t quiet; - compat_int_t timeout; - compat_caddr_t unused; -}; -#endif - -static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc, - const void __user *arg) -{ -#ifdef CONFIG_COMPAT - if (in_compat_syscall()) { - struct compat_cdrom_generic_command cgc32; - - if (copy_from_user(&cgc32, arg, sizeof(cgc32))) - return -EFAULT; - - *cgc = (struct cdrom_generic_command) { - .buffer = compat_ptr(cgc32.buffer), - .buflen = cgc32.buflen, - .stat = cgc32.stat, - .sense = compat_ptr(cgc32.sense), - .data_direction = cgc32.data_direction, - .quiet = cgc32.quiet, - .timeout = cgc32.timeout, - .unused = compat_ptr(cgc32.unused), - }; - memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE); - return 0; - } -#endif - if (copy_from_user(cgc, arg, sizeof(*cgc))) - return -EFAULT; - - return 0; -} - -static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc, - void __user *arg) -{ -#ifdef CONFIG_COMPAT - if (in_compat_syscall()) { - struct compat_cdrom_generic_command cgc32 = { - .buffer = (uintptr_t)(cgc->buffer), - .buflen = cgc->buflen, - .stat = cgc->stat, - .sense = (uintptr_t)(cgc->sense), - .data_direction = cgc->data_direction, - .quiet = cgc->quiet, - .timeout = cgc->timeout, - .unused = (uintptr_t)(cgc->unused), - }; - memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE); - - if (copy_to_user(arg, &cgc32, sizeof(cgc32))) - return -EFAULT; - - return 0; - } -#endif - if (copy_to_user(arg, cgc, sizeof(*cgc))) - return -EFAULT; - - return 0; -} - -static int scsi_cdrom_send_packet(struct request_queue *q, - struct gendisk *bd_disk, - fmode_t mode, void __user *arg) -{ - struct cdrom_generic_command cgc; - struct sg_io_hdr hdr; - int err; - - err = scsi_get_cdrom_generic_arg(&cgc, arg); - if (err) - return err; - - cgc.timeout = clock_t_to_jiffies(cgc.timeout); - memset(&hdr, 0, sizeof(hdr)); - hdr.interface_id = 'S'; - hdr.cmd_len = sizeof(cgc.cmd); - hdr.dxfer_len = cgc.buflen; - switch (cgc.data_direction) { - case CGC_DATA_UNKNOWN: - hdr.dxfer_direction = SG_DXFER_UNKNOWN; - break; - case CGC_DATA_WRITE: - hdr.dxfer_direction = SG_DXFER_TO_DEV; - break; - case CGC_DATA_READ: - hdr.dxfer_direction = SG_DXFER_FROM_DEV; - break; - case CGC_DATA_NONE: - hdr.dxfer_direction = SG_DXFER_NONE; - break; - default: - return -EINVAL; - } - - hdr.dxferp = cgc.buffer; - hdr.sbp = cgc.sense; - if (hdr.sbp) - hdr.mx_sb_len = sizeof(struct request_sense); - hdr.timeout = jiffies_to_msecs(cgc.timeout); - hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; - hdr.cmd_len = sizeof(cgc.cmd); - - err = sg_io(q, bd_disk, &hdr, mode); - if (err == -EFAULT) - return -EFAULT; - - if (hdr.status) - return -EIO; - - cgc.stat = err; - cgc.buflen = hdr.resid; - if (scsi_put_cdrom_generic_arg(&cgc, arg)) - return -EFAULT; - - return err; -} - -int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, - unsigned int cmd, void __user *arg) -{ - int err; - - if (!q) - return -ENXIO; - - switch (cmd) { - /* - * new sgv3 interface - */ - case SG_GET_VERSION_NUM: - err = sg_get_version(arg); - break; - case SCSI_IOCTL_GET_IDLUN: - err = scsi_get_idlun(q, arg); - break; - case SCSI_IOCTL_GET_BUS_NUMBER: - err = scsi_get_bus(q, arg); - break; - case SG_SET_TIMEOUT: - err = sg_set_timeout(q, arg); - break; - case SG_GET_TIMEOUT: - err = sg_get_timeout(q); - break; - case SG_GET_RESERVED_SIZE: - err = sg_get_reserved_size(q, arg); - break; - case SG_SET_RESERVED_SIZE: - err = sg_set_reserved_size(q, arg); - break; - case SG_EMULATED_HOST: - err = sg_emulated_host(q, arg); - break; - case SG_IO: { - struct sg_io_hdr hdr; - - err = get_sg_io_hdr(&hdr, arg); - if (err) - break; - err = sg_io(q, bd_disk, &hdr, mode); - if (err == -EFAULT) - break; - - if (put_sg_io_hdr(&hdr, arg)) - err = -EFAULT; - break; - } - case CDROM_SEND_PACKET: - err = scsi_cdrom_send_packet(q, bd_disk, mode, arg); - break; - - /* - * old junk scsi send command ioctl - */ - case SCSI_IOCTL_SEND_COMMAND: - printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); - err = -EINVAL; - if (!arg) - break; - - err = sg_scsi_ioctl(q, bd_disk, mode, arg); - break; - case CDROMCLOSETRAY: - err = blk_send_start_stop(q, bd_disk, 0x03); - break; - case CDROMEJECT: - err = blk_send_start_stop(q, bd_disk, 0x02); - break; - default: - err = -ENOTTY; - } - - return err; -} -EXPORT_SYMBOL(scsi_cmd_ioctl); - -int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) -{ - if (bd && !bdev_is_partition(bd)) - return 0; - - if (capable(CAP_SYS_RAWIO)) - return 0; - - return -ENOIOCTLCMD; -} -EXPORT_SYMBOL(scsi_verify_blk_ioctl); - -int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode, - unsigned int cmd, void __user *arg) -{ - int ret; - - ret = scsi_verify_blk_ioctl(bd, cmd); - if (ret < 0) - return ret; - - return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg); -} -EXPORT_SYMBOL(scsi_cmd_blk_ioctl); - -/** - * scsi_req_init - initialize certain fields of a scsi_request structure - * @req: Pointer to a scsi_request structure. - * Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members - * of struct scsi_request. - */ -void scsi_req_init(struct scsi_request *req) -{ - memset(req->__cmd, 0, sizeof(req->__cmd)); - req->cmd = req->__cmd; - req->cmd_len = BLK_MAX_CDB; - req->sense_len = 0; -} -EXPORT_SYMBOL(scsi_req_init); - -static int __init blk_scsi_ioctl_init(void) -{ - blk_set_cmd_filter_defaults(&blk_default_cmd_filter); - return 0; -} -fs_initcall(blk_scsi_ioctl_init); |