diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/nvme-scsi.c | 3 | ||||
-rw-r--r-- | drivers/block/rbd.c | 5 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 35 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 23 |
5 files changed, 42 insertions, 26 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ae3fcb4199e9..d7173cb1ea76 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1620,8 +1620,8 @@ out: static void loop_remove(struct loop_device *lo) { - del_gendisk(lo->lo_disk); blk_cleanup_queue(lo->lo_queue); + del_gendisk(lo->lo_disk); blk_mq_free_tag_set(&lo->tag_set); put_disk(lo->lo_disk); kfree(lo); diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 6b736b00f63e..88f13c525712 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len) { - __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); + __be32 max_sectors = cpu_to_be32( + nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); __be32 discard_desc_count = cpu_to_be32(0x100); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 812523330a78..ec6c5c6e1ac9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) result, xferred); if (!img_request->result) img_request->result = result; + /* + * Need to end I/O on the entire obj_request worth of + * bytes in case of error. + */ + xferred = obj_request->length; } /* Image object requests don't own their page array */ diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bd2b3bbbb22c..713fc9ff1149 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, atomic_dec(&blkif->persistent_gnt_in_use); } -static void free_persistent_gnts_unmap_callback(int result, - struct gntab_unmap_queue_data *data) -{ - struct completion *c = data->data; - - /* BUG_ON used to reproduce existing behaviour, - but is this the best way to deal with this? */ - BUG_ON(result); - complete(c); -} - static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { @@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, struct rb_node *n; int segs_to_unmap = 0; struct gntab_unmap_queue_data unmap_data; - struct completion unmap_completion; - init_completion(&unmap_completion); - - unmap_data.data = &unmap_completion; - unmap_data.done = &free_persistent_gnts_unmap_callback; unmap_data.pages = pages; unmap_data.unmap_ops = unmap; unmap_data.kunmap_ops = NULL; @@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, !rb_next(&persistent_gnt->node)) { unmap_data.count = segs_to_unmap; - gnttab_unmap_refs_async(&unmap_data); - wait_for_completion(&unmap_completion); + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; @@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; - int ret, segs_to_unmap = 0; + int segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); + struct gntab_unmap_queue_data unmap_data; + + unmap_data.pages = pages; + unmap_data.unmap_ops = unmap; + unmap_data.kunmap_ops = NULL; while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, @@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - ret = gnttab_unmap_refs(unmap, NULL, pages, - segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { - ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); } } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c94386aa563d..8dcbced0eafd 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev) return (struct zram *)dev_to_disk(dev)->private_data; } +static ssize_t compact_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + unsigned long nr_migrated; + struct zram *zram = dev_to_zram(dev); + struct zram_meta *meta; + + down_read(&zram->init_lock); + if (!init_done(zram)) { + up_read(&zram->init_lock); + return -EINVAL; + } + + meta = zram->meta; + nr_migrated = zs_compact(meta->mem_pool); + atomic64_add(nr_migrated, &zram->stats.num_migrated); + up_read(&zram->init_lock); + + return len; +} + static ssize_t disksize_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); static DEVICE_ATTR_RO(initstate); static DEVICE_ATTR_WO(reset); @@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_num_writes.attr, &dev_attr_failed_reads.attr, &dev_attr_failed_writes.attr, + &dev_attr_compact.attr, &dev_attr_invalid_io.attr, &dev_attr_notify_free.attr, &dev_attr_zero_pages.attr, |