diff options
Diffstat (limited to 'drivers')
54 files changed, 342 insertions, 363 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 422b7d84f686..ad80c85e0857 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1110,7 +1110,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) d->ip.rq = NULL; do { bio = rq->bio; - bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); + bok = !fastfail && !bio->bi_error; } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); /* cf. http://lkml.org/lkml/2006/10/31/28 */ @@ -1172,7 +1172,7 @@ ktiocomplete(struct frame *f) ahout->cmdstat, ahin->cmdstat, d->aoemajor, d->aoeminor); noskb: if (buf) - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; goto out; } @@ -1185,7 +1185,7 @@ noskb: if (buf) "aoe: runt data size in read from", (long) d->aoemajor, d->aoeminor, skb->len, n); - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; break; } if (n > f->iter.bi_size) { @@ -1193,7 +1193,7 @@ noskb: if (buf) "aoe: too-large data size in read from", (long) d->aoemajor, d->aoeminor, n, f->iter.bi_size); - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; break; } bvcpy(skb, f->buf->bio, f->iter, n); @@ -1695,7 +1695,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) if (buf == NULL) return; buf->iter.bi_size = 0; - clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + buf->bio->bi_error = -EIO; if (buf->nframesout == 0) aoe_end_buf(d, buf); } diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index e774c50b6842..ffd1947500c6 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d) if (rq == NULL) return; while ((bio = d->ip.nxbio)) { - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; d->ip.nxbio = bio->bi_next; n = (unsigned long) rq->special; rq->special = (void *) --n; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index e573e470bd8a..f9ab74505e69 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -331,14 +331,12 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) struct bio_vec bvec; sector_t sector; struct bvec_iter iter; - int err = -EIO; sector = bio->bi_iter.bi_sector; if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) - goto out; + goto io_error; if (unlikely(bio->bi_rw & REQ_DISCARD)) { - err = 0; discard_from_brd(brd, sector, bio->bi_iter.bi_size); goto out; } @@ -349,15 +347,20 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; + int err; + err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, rw, sector); if (err) - break; + goto io_error; sector += len >> SECTOR_SHIFT; } out: - bio_endio(bio, err); + bio_endio(bio); + return; +io_error: + bio_io_error(bio); } static int brd_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 1318e3217cb0..b3868e7a1ffd 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -175,11 +175,11 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ device->md_io.submit_jif = jiffies; if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) - bio_endio(bio, -EIO); + bio_io_error(bio); else submit_bio(rw, bio); wait_until_done_or_force_detached(device, bdev, &device->md_io.done); - if (bio_flagged(bio, BIO_UPTODATE)) + if (!bio->bi_error) err = device->md_io.error; out: diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 434c77dcc99e..e5e0f19ceda0 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -941,36 +941,27 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref) } /* bv_page may be a copy, or may be the original */ -static void drbd_bm_endio(struct bio *bio, int error) +static void drbd_bm_endio(struct bio *bio) { struct drbd_bm_aio_ctx *ctx = bio->bi_private; struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); - int uptodate = bio_flagged(bio, BIO_UPTODATE); - - - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! - * do we want to WARN() on this? */ - if (!error && !uptodate) - error = -EIO; if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && !bm_test_page_unchanged(b->bm_pages[idx])) drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); - if (error) { + if (bio->bi_error) { /* ctx error will hold the completed-last non-zero error code, * in case error codes differ. */ - ctx->error = error; + ctx->error = bio->bi_error; bm_set_page_io_err(b->bm_pages[idx]); /* Not identical to on disk version of it. * Is BM_PAGE_IO_ERROR enough? */ if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", - error, idx); + bio->bi_error, idx); } else { bm_clear_page_io_err(b->bm_pages[idx]); dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); @@ -1031,7 +1022,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio->bi_rw |= rw; - bio_endio(bio, -EIO); + bio_io_error(bio); } else { submit_bio(rw, bio); /* this should not count as user activity and cause the diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index efd19c2da9c2..a08c4a9179f1 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1481,9 +1481,9 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd); /* drbd_worker.c */ /* bi_end_io handlers */ -extern void drbd_md_endio(struct bio *bio, int error); -extern void drbd_peer_request_endio(struct bio *bio, int error); -extern void drbd_request_endio(struct bio *bio, int error); +extern void drbd_md_endio(struct bio *bio); +extern void drbd_peer_request_endio(struct bio *bio); +extern void drbd_request_endio(struct bio *bio); extern int drbd_worker(struct drbd_thread *thi); enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); void drbd_resync_after_changed(struct drbd_device *device); @@ -1604,12 +1604,13 @@ static inline void drbd_generic_make_request(struct drbd_device *device, __release(local); if (!bio->bi_bdev) { drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); - bio_endio(bio, -ENODEV); + bio->bi_error = -ENODEV; + bio_endio(bio); return; } if (drbd_insert_fault(device, fault_type)) - bio_endio(bio, -EIO); + bio_io_error(bio); else generic_make_request(bio); } diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3907202fb9d9..9cb41166366e 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -201,7 +201,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - bio_endio(m->bio, m->error); + m->bio->bi_error = m->error; + bio_endio(m->bio); dec_ap_bio(device); } @@ -1153,12 +1154,12 @@ drbd_submit_req_private_bio(struct drbd_request *req) rw == WRITE ? DRBD_FAULT_DT_WR : rw == READ ? DRBD_FAULT_DT_RD : DRBD_FAULT_DT_RA)) - bio_endio(bio, -EIO); + bio_io_error(bio); else generic_make_request(bio); put_ldev(device); } else - bio_endio(bio, -EIO); + bio_io_error(bio); } static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) @@ -1191,7 +1192,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long /* only pass the error to the upper layers. * if user cannot handle io errors, that's not our business. */ drbd_err(device, "could not kmalloc() req\n"); - bio_endio(bio, -ENOMEM); + bio->bi_error = -ENOMEM; + bio_endio(bio); return ERR_PTR(-ENOMEM); } req->start_jif = start_jif; diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d0fae55d871d..5578c1477ba6 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -65,12 +65,12 @@ rwlock_t global_state_lock; /* used for synchronous meta data and bitmap IO * submitted by drbd_md_sync_page_io() */ -void drbd_md_endio(struct bio *bio, int error) +void drbd_md_endio(struct bio *bio) { struct drbd_device *device; device = bio->bi_private; - device->md_io.error = error; + device->md_io.error = bio->bi_error; /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able * to timeout on the lower level device, and eventually detach from it. @@ -170,31 +170,20 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver. */ -void drbd_peer_request_endio(struct bio *bio, int error) +void drbd_peer_request_endio(struct bio *bio) { struct drbd_peer_request *peer_req = bio->bi_private; struct drbd_device *device = peer_req->peer_device->device; - int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; int is_discard = !!(bio->bi_rw & REQ_DISCARD); - if (error && __ratelimit(&drbd_ratelimit_state)) + if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", is_write ? (is_discard ? "discard" : "write") - : "read", error, + : "read", bio->bi_error, (unsigned long long)peer_req->i.sector); - if (!error && !uptodate) { - if (__ratelimit(&drbd_ratelimit_state)) - drbd_warn(device, "%s: setting error to -EIO s=%llus\n", - is_write ? "write" : "read", - (unsigned long long)peer_req->i.sector); - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! */ - error = -EIO; - } - if (error) + if (bio->bi_error) set_bit(__EE_WAS_ERROR, &peer_req->flags); bio_put(bio); /* no need for the bio anymore */ @@ -208,24 +197,13 @@ void drbd_peer_request_endio(struct bio *bio, int error) /* read, readA or write requests on R_PRIMARY coming from drbd_make_request */ -void drbd_request_endio(struct bio *bio, int error) +void drbd_request_endio(struct bio *bio) { unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_device *device = req->device; struct bio_and_error m; enum drbd_req_event what; - int uptodate = bio_flagged(bio, BIO_UPTODATE); - - if (!error && !uptodate) { - drbd_warn(device, "p %s: setting error to -EIO\n", - bio_data_dir(bio) == WRITE ? "write" : "read"); - /* strange behavior of some lower level drivers... - * fail the request by clearing the uptodate flag, - * but do not return any error?! */ - error = -EIO; - } - /* If this request was aborted locally before, * but now was completed "successfully", @@ -259,14 +237,14 @@ void drbd_request_endio(struct bio *bio, int error) if (__ratelimit(&drbd_ratelimit_state)) drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); - if (!error) + if (!bio->bi_error) panic("possible random memory corruption caused by delayed completion of aborted local request\n"); } /* to avoid recursion in __req_mod */ - if (unlikely(error)) { + if (unlikely(bio->bi_error)) { if (bio->bi_rw & REQ_DISCARD) - what = (error == -EOPNOTSUPP) + what = (bio->bi_error == -EOPNOTSUPP) ? DISCARD_COMPLETED_NOTSUPP : DISCARD_COMPLETED_WITH_ERROR; else @@ -279,7 +257,7 @@ void drbd_request_endio(struct bio *bio, int error) what = COMPLETED_OK; bio_put(req->private_bio); - req->private_bio = ERR_PTR(error); + req->private_bio = ERR_PTR(bio->bi_error); /* not req_mod(), we need irqsave here! */ spin_lock_irqsave(&device->resource->req_lock, flags); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a08cda955285..331363e7de0f 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3771,13 +3771,14 @@ struct rb0_cbdata { struct completion complete; }; -static void floppy_rb0_cb(struct bio *bio, int err) +static void floppy_rb0_cb(struct bio *bio) { struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; int drive = cbdata->drive; - if (err) { - pr_info("floppy: error %d while reading block 0\n", err); + if (bio->bi_error) { + pr_info("floppy: error %d while reading block 0\n", + bio->bi_error); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); } complete(&cbdata->complete); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69de41a87b74..016a59afcf24 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -222,7 +222,7 @@ static void end_cmd(struct nullb_cmd *cmd) blk_end_request_all(cmd->rq, 0); break; case NULL_Q_BIO: - bio_endio(cmd->bio, 0); + bio_endio(cmd->bio); break; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4c20c228184c..a7a259e031da 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -977,7 +977,7 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) } } -static void pkt_end_io_read(struct bio *bio, int err) +static void pkt_end_io_read(struct bio *bio) { struct packet_data *pkt = bio->bi_private; struct pktcdvd_device *pd = pkt->pd; @@ -985,9 +985,9 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", bio, (unsigned long long)pkt->sector, - (unsigned long long)bio->bi_iter.bi_sector, err); + (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); - if (err) + if (bio->bi_error) atomic_inc(&pkt->io_errors); if (atomic_dec_and_test(&pkt->io_wait)) { atomic_inc(&pkt->run_sm); @@ -996,13 +996,13 @@ static void pkt_end_io_read(struct bio *bio, int err) pkt_bio_finished(pd); } -static void pkt_end_io_packet_write(struct bio *bio, int err) +static void pkt_end_io_packet_write(struct bio *bio) { struct packet_data *pkt = bio->bi_private; struct pktcdvd_device *pd = pkt->pd; BUG_ON(!pd); - pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); + pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error); pd->stats.pkt_ended++; @@ -1340,22 +1340,22 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) pkt_queue_bio(pd, pkt->w_bio); } -static void pkt_finish_packet(struct packet_data *pkt, int uptodate) +static void pkt_finish_packet(struct packet_data *pkt, int error) { struct bio *bio; - if (!uptodate) + if (error) pkt->cache_valid = 0; /* Finish all bios corresponding to this packet */ - while ((bio = bio_list_pop(&pkt->orig_bios))) - bio_endio(bio, uptodate ? 0 : -EIO); + while ((bio = bio_list_pop(&pkt->orig_bios))) { + bio->bi_error = error; + bio_endio(bio); + } } static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) { - int uptodate; - pkt_dbg(2, pd, "pkt %d\n", pkt->id); for (;;) { @@ -1384,7 +1384,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data if (atomic_read(&pkt->io_wait) > 0) return; - if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) { + if (!pkt->w_bio->bi_error) { pkt_set_state(pkt, PACKET_FINISHED_STATE); } else { pkt_set_state(pkt, PACKET_RECOVERY_STATE); @@ -1401,8 +1401,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data break; case PACKET_FINISHED_STATE: - uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags); - pkt_finish_packet(pkt, uptodate); + pkt_finish_packet(pkt, pkt->w_bio->bi_error); return; default: @@ -2332,13 +2331,14 @@ static void pkt_close(struct gendisk *disk, fmode_t mode) } -static void pkt_end_io_read_cloned(struct bio *bio, int err) +static void pkt_end_io_read_cloned(struct bio *bio) { struct packet_stacked_data *psd = bio->bi_private; struct pktcdvd_device *pd = psd->pd; + psd->bio->bi_error = bio->bi_error; bio_put(bio); - bio_endio(psd->bio, err); + bio_endio(psd->bio); mempool_free(psd, psd_pool); pkt_bio_finished(pd); } diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index b1612eb16172..49b4706b162c 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -593,7 +593,8 @@ out: next = bio_list_peek(&priv->list); spin_unlock_irq(&priv->lock); - bio_endio(bio, error); + bio->bi_error = error; + bio_endio(bio); return next; } diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index ac8c62cb4875..63b9d2ffa8ee 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -137,7 +137,10 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, if (!card->eeh_state && card->gendisk) disk_stats_complete(card, meta->bio, meta->start_time); - bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); + if (atomic_read(&meta->error)) + bio_io_error(meta->bio); + else + bio_endio(meta->bio); kmem_cache_free(bio_meta_pool, meta); } } @@ -199,7 +202,9 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) queue_err: kmem_cache_free(bio_meta_pool, bio_meta); req_err: - bio_endio(bio, st); + if (st) + bio->bi_error = st; + bio_endio(bio); } /*----------------- Device Setup -------------------*/ diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4cf81b5bf0f7..3b3afd2ec5d6 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -456,7 +456,7 @@ static void process_page(unsigned long data) PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (control & DMASCR_HARD_ERROR) { /* error */ - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; dev_printk(KERN_WARNING, &card->dev->dev, "I/O error on sector %d/%d\n", le32_to_cpu(desc->local_addr)>>9, @@ -505,7 +505,7 @@ static void process_page(unsigned long data) return_bio = bio->bi_next; bio->bi_next = NULL; - bio_endio(bio, 0); + bio_endio(bio); } } diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ced96777b677..662648e08596 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1078,9 +1078,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) /* * bio callback. */ -static void end_block_io_op(struct bio *bio, int error) +static void end_block_io_op(struct bio *bio) { - __end_block_io_op(bio->bi_private, error); + __end_block_io_op(bio->bi_private, bio->bi_error); bio_put(bio); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6d89ed35d80c..d542db7a6c73 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -82,7 +82,6 @@ struct blk_shadow { struct split_bio { struct bio *bio; atomic_t pending; - int err; }; static DEFINE_MUTEX(blkfront_mutex); @@ -1478,16 +1477,14 @@ static int blkfront_probe(struct xenbus_device *dev, return 0; } -static void split_bio_end(struct bio *bio, int error) +static void split_bio_end(struct bio *bio) { struct split_bio *split_bio = bio->bi_private; - if (error) - split_bio->err = error; - if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; - bio_endio(split_bio->bio, split_bio->err); + split_bio->bio->bi_error = bio->bi_error; + bio_endio(split_bio->bio); kfree(split_bio); } bio_put(bio); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f439ad2800da..68c3d4800464 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -850,7 +850,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) if (unlikely(bio->bi_rw & REQ_DISCARD)) { zram_bio_discard(zram, index, offset, bio); - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -883,8 +883,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) update_position(&index, &offset, &bvec); } - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio_endio(bio, 0); + bio_endio(bio); return; out: diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 00cde40db572..83392f856dfd 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -278,7 +278,7 @@ err: goto out; } -static void btree_node_read_endio(struct bio *bio, int error) +static void btree_node_read_endio(struct bio *bio) { struct closure *cl = bio->bi_private; closure_put(cl); @@ -305,7 +305,7 @@ static void bch_btree_node_read(struct btree *b) bch_submit_bbio(bio, b->c, &b->key, 0); closure_sync(&cl); - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (bio->bi_error) set_btree_node_io_error(b); bch_bbio_free(bio, b->c); @@ -371,15 +371,15 @@ static void btree_node_write_done(struct closure *cl) __btree_node_write_done(cl); } -static void btree_node_write_endio(struct bio *bio, int error) +static void btree_node_write_endio(struct bio *bio) { struct closure *cl = bio->bi_private; struct btree *b = container_of(cl, struct btree, io); - if (error) + if (bio->bi_error) set_btree_node_io_error(b); - bch_bbio_count_io_errors(b->c, bio, error, "writing btree"); + bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); closure_put(cl); } diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 79a6d63e8ed3..782cc2c8a185 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -38,7 +38,7 @@ * they are running owned by the thread that is running them. Otherwise, suppose * you submit some bios and wish to have a function run when they all complete: * - * foo_endio(struct bio *bio, int error) + * foo_endio(struct bio *bio) * { * closure_put(cl); * } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index bf6a9ca18403..9440df94bc83 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -55,19 +55,19 @@ static void bch_bio_submit_split_done(struct closure *cl) s->bio->bi_end_io = s->bi_end_io; s->bio->bi_private = s->bi_private; - bio_endio(s->bio, 0); + bio_endio(s->bio); closure_debug_destroy(&s->cl); mempool_free(s, s->p->bio_split_hook); } -static void bch_bio_submit_split_endio(struct bio *bio, int error) +static void bch_bio_submit_split_endio(struct bio *bio) { struct closure *cl = bio->bi_private; struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); - if (error) - clear_bit(BIO_UPTODATE, &s->bio->bi_flags); + if (bio->bi_error) + s->bio->bi_error = bio->bi_error; bio_put(bio); closure_put(cl); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 418607a6ba33..d6a4e16030a6 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -24,7 +24,7 @@ * bit. */ -static void journal_read_endio(struct bio *bio, int error) +static void journal_read_endio(struct bio *bio) { struct closure *cl = bio->bi_private; closure_put(cl); @@ -401,7 +401,7 @@ retry: #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) -static void journal_discard_endio(struct bio *bio, int error) +static void journal_discard_endio(struct bio *bio) { struct journal_device *ja = container_of(bio, struct journal_device, discard_bio); @@ -547,11 +547,11 @@ void bch_journal_next(struct journal *j) pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); } -static void journal_write_endio(struct bio *bio, int error) +static void journal_write_endio(struct bio *bio) { struct journal_write *w = bio->bi_private; - cache_set_err_on(error, w->c, "journal io error"); + cache_set_err_on(bio->bi_error, w->c, "journal io error"); closure_put(&w->c->journal.io); } diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index cd7490311e51..b929fc944e9c 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -60,20 +60,20 @@ static void write_moving_finish(struct closure *cl) closure_return_with_destructor(cl, moving_io_destructor); } -static void read_moving_endio(struct bio *bio, int error) +static void read_moving_endio(struct bio *bio) { struct bbio *b = container_of(bio, struct bbio, bio); struct moving_io *io = container_of(bio->bi_private, struct moving_io, cl); - if (error) - io->op.error = error; + if (bio->bi_error) + io->op.error = bio->bi_error; else if (!KEY_DIRTY(&b->key) && ptr_stale(io->op.c, &b->key, 0)) { io->op.error = -EINTR; } - bch_bbio_endio(io->op.c, bio, error, "reading data to move"); + bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); } static void moving_init(struct moving_io *io) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index f292790997d7..a09b9462ff49 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -173,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl) bch_data_insert_keys(cl); } -static void bch_data_insert_endio(struct bio *bio, int error) +static void bch_data_insert_endio(struct bio *bio) { struct closure *cl = bio->bi_private; struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); - if (error) { + if (bio->bi_error) { /* TODO: We could try to recover from this. */ if (op->writeback) - op->error = error; + op->error = bio->bi_error; else if (!op->replace) set_closure_fn(cl, bch_data_insert_error, op->wq); else set_closure_fn(cl, NULL, NULL); } - bch_bbio_endio(op->c, bio, error, "writing data to cache"); + bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); } static void bch_data_insert_start(struct closure *cl) @@ -477,7 +477,7 @@ struct search { struct data_insert_op iop; }; -static void bch_cache_read_endio(struct bio *bio, int error) +static void bch_cache_read_endio(struct bio *bio) { struct bbio *b = container_of(bio, struct bbio, bio); struct closure *cl = bio->bi_private; @@ -490,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error) * from the backing device. */ - if (error) - s->iop.error = error; + if (bio->bi_error) + s->iop.error = bio->bi_error; else if (!KEY_DIRTY(&b->key) && ptr_stale(s->iop.c, &b->key, 0)) { atomic_long_inc(&s->iop.c->cache_read_races); s->iop.error = -EINTR; } - bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); + bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); } /* @@ -591,13 +591,13 @@ static void cache_lookup(struct closure *cl) /* Common code for the make_request functions */ -static void request_endio(struct bio *bio, int error) +static void request_endio(struct bio *bio) { struct closure *cl = bio->bi_private; - if (error) { + if (bio->bi_error) { struct search *s = container_of(cl, struct search, cl); - s->iop.error = error; + s->iop.error = bio->bi_error; /* Only cache read errors are recoverable */ s->recoverable = false; } @@ -613,7 +613,8 @@ static void bio_complete(struct search *s) &s->d->disk->part0, s->start_time); trace_bcache_request_end(s->d, s->orig_bio); - bio_endio(s->orig_bio, s->iop.error); + s->orig_bio->bi_error = s->iop.error; + bio_endio(s->orig_bio); s->orig_bio = NULL; } } @@ -992,7 +993,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) } else { if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(dc->bdev))) - bio_endio(bio, 0); + bio_endio(bio); else bch_generic_make_request(bio, &d->bio_split_hook); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fc8e545ced18..be01fd3c87f1 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -221,7 +221,7 @@ err: return err; } -static void write_bdev_super_endio(struct bio *bio, int error) +static void write_bdev_super_endio(struct bio *bio) { struct cached_dev *dc = bio->bi_private; /* XXX: error checking */ @@ -290,11 +290,11 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) closure_return_with_destructor(cl, bch_write_bdev_super_unlock); } -static void write_super_endio(struct bio *bio, int error) +static void write_super_endio(struct bio *bio) { struct cache *ca = bio->bi_private; - bch_count_io_errors(ca, error, "writing superblock"); + bch_count_io_errors(ca, bio->bi_error, "writing superblock"); closure_put(&ca->set->sb_write); } @@ -339,12 +339,12 @@ void bcache_write_super(struct cache_set *c) /* UUID io */ -static void uuid_endio(struct bio *bio, int error) +static void uuid_endio(struct bio *bio) { struct closure *cl = bio->bi_private; struct cache_set *c = container_of(cl, struct cache_set, uuid_write); - cache_set_err_on(error, c, "accessing uuids"); + cache_set_err_on(bio->bi_error, c, "accessing uuids"); bch_bbio_free(bio, c); closure_put(cl); } @@ -512,11 +512,11 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c) * disk. */ -static void prio_endio(struct bio *bio, int error) +static void prio_endio(struct bio *bio) { struct cache *ca = bio->bi_private; - cache_set_err_on(error, ca->set, "accessing priorities"); + cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); bch_bbio_free(bio, ca->set); closure_put(&ca->prio); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index f1986bcd1bf0..b4fc874c30fd 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -166,12 +166,12 @@ static void write_dirty_finish(struct closure *cl) closure_return_with_destructor(cl, dirty_io_destructor); } -static void dirty_endio(struct bio *bio, int error) +static void dirty_endio(struct bio *bio) { struct keybuf_key *w = bio->bi_private; struct dirty_io *io = w->private; - if (error) + if (bio->bi_error) SET_KEY_DIRTY(&w->key, false); closure_put(&io->cl); @@ -193,15 +193,15 @@ static void write_dirty(struct closure *cl) continue_at(cl, write_dirty_finish, system_wq); } -static void read_dirty_endio(struct bio *bio, int error) +static void read_dirty_endio(struct bio *bio) { struct keybuf_key *w = bio->bi_private; struct dirty_io *io = w->private; bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), - error, "reading dirty data from cache"); + bio->bi_error, "reading dirty data from cache"); - dirty_endio(bio, error); + dirty_endio(bio); } static void read_dirty_submit(struct closure *cl) diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c index cd6d1d21e057..03af174485d3 100644 --- a/drivers/md/dm-bio-prison.c +++ b/drivers/md/dm-bio-prison.c @@ -236,8 +236,10 @@ void dm_cell_error(struct dm_bio_prison *prison, bio_list_init(&bios); dm_cell_release(prison, cell, &bios); - while ((bio = bio_list_pop(&bios))) - bio_endio(bio, error); + while ((bio = bio_list_pop(&bios))) { + bio->bi_error = error; + bio_endio(bio); + } } EXPORT_SYMBOL_GPL(dm_cell_error); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 86dbbc737402..83cc52eaf56d 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -545,7 +545,8 @@ static void dmio_complete(unsigned long error, void *context) { struct dm_buffer *b = context; - b->bio.bi_end_io(&b->bio, error ? -EIO : 0); + b->bio.bi_error = error ? -EIO : 0; + b->bio.bi_end_io(&b->bio); } static void use_dmio(struct dm_buffer *b, int rw, sector_t block, @@ -575,13 +576,16 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, b->bio.bi_end_io = end_io; r = dm_io(&io_req, 1, ®ion, NULL); - if (r) - end_io(&b->bio, r); + if (r) { + b->bio.bi_error = r; + end_io(&b->bio); + } } -static void inline_endio(struct bio *bio, int error) +static void inline_endio(struct bio *bio) { bio_end_io_t *end_fn = bio->bi_private; + int error = bio->bi_error; /* * Reset the bio to free any attached resources @@ -589,7 +593,8 @@ static void inline_endio(struct bio *bio, int error) */ bio_reset(bio); - end_fn(bio, error); + bio->bi_error = error; + end_fn(bio); } static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, @@ -661,13 +666,14 @@ static void submit_io(struct dm_buffer *b, int rw, sector_t block, * Set the error, clear B_WRITING bit and wake anyone who was waiting on * it. */ -static void write_endio(struct bio *bio, int error) +static void write_endio(struct bio *bio) { struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); - b->write_error = error; - if (unlikely(error)) { + b->write_error = bio->bi_error; + if (unlikely(bio->bi_error)) { struct dm_bufio_client *c = b->c; + int error = bio->bi_error; (void)cmpxchg(&c->async_write_error, 0, error); } @@ -1026,11 +1032,11 @@ found_buffer: * The endio routine for reading: set the error, clear the bit and wake up * anyone waiting on the buffer. */ -static void read_endio(struct bio *bio, int error) +static void read_endio(struct bio *bio) { struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); - b->read_error = error; + b->read_error = bio->bi_error; BUG_ON(!test_bit(B_READING, &b->state)); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1b4e1756b169..04d0dadc48b1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -919,14 +919,14 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) wake_worker(cache); } -static void writethrough_endio(struct bio *bio, int err) +static void writethrough_endio(struct bio *bio) { struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); dm_unhook_bio(&pb->hook_info, bio); - if (err) { - bio_endio(bio, err); + if (bio->bi_error) { + bio_endio(bio); return; } @@ -1231,7 +1231,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) * The block was promoted via an overwrite, so it's dirty. */ set_dirty(cache, mg->new_oblock, mg->cblock); - bio_endio(mg->new_ocell->holder, 0); + bio_endio(mg->new_ocell->holder); cell_defer(cache, mg->new_ocell, false); } free_io_migration(mg); @@ -1284,7 +1284,7 @@ static void issue_copy(struct dm_cache_migration *mg) } } -static void overwrite_endio(struct bio *bio, int err) +static void overwrite_endio(struct bio *bio) { struct dm_cache_migration *mg = bio->bi_private; struct cache *cache = mg->cache; @@ -1294,7 +1294,7 @@ static void overwrite_endio(struct bio *bio, int err) dm_unhook_bio(&pb->hook_info, bio); - if (err) + if (bio->bi_error) mg->err = true; mg->requeue_holder = false; @@ -1358,7 +1358,7 @@ static void issue_discard(struct dm_cache_migration *mg) b = to_dblock(from_dblock(b) + 1); } - bio_endio(bio, 0); + bio_endio(bio); cell_defer(mg->cache, mg->new_ocell, false); free_migration(mg); } @@ -1631,7 +1631,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, calc_discard_block_range(cache, bio, &b, &e); if (b == e) { - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -2213,8 +2213,10 @@ static void requeue_deferred_bios(struct cache *cache) bio_list_merge(&bios, &cache->deferred_bios); bio_list_init(&cache->deferred_bios); - while ((bio = bio_list_pop(&bios))) - bio_endio(bio, DM_ENDIO_REQUEUE); + while ((bio = bio_list_pop(&bios))) { + bio->bi_error = DM_ENDIO_REQUEUE; + bio_endio(bio); + } } static int more_work(struct cache *cache) @@ -3119,7 +3121,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) * This is a duplicate writethrough io that is no * longer needed because the block has been demoted. */ - bio_endio(bio, 0); + bio_endio(bio); // FIXME: remap everything as a miss cell_defer(cache, cell, false); r = DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 0f48fed44a17..744b80c608e5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1076,7 +1076,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (io->ctx.req) crypt_free_req(cc, io->ctx.req, base_bio); - bio_endio(base_bio, error); + base_bio->bi_error = error; + bio_endio(base_bio); } /* @@ -1096,15 +1097,12 @@ static void crypt_dec_pending(struct dm_crypt_io *io) * The work is done per CPU global for all dm-crypt instances. * They should not depend on each other and do not block. */ -static void crypt_endio(struct bio *clone, int error) +static void crypt_endio(struct bio *clone) { struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->cc; unsigned rw = bio_data_dir(clone); - if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) - error = -EIO; - /* * free the processed pages */ @@ -1113,13 +1111,13 @@ static void crypt_endio(struct bio *clone, int error) bio_put(clone); - if (rw == READ && !error) { + if (rw == READ && !clone->bi_error) { kcryptd_queue_crypt(io); return; } - if (unlikely(error)) - io->error = error; + if (unlikely(clone->bi_error)) + io->error = clone->bi_error; crypt_dec_pending(io); } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b257e46876d3..04481247aab8 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -296,7 +296,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) * Drop writes? */ if (test_bit(DROP_WRITES, &fc->flags)) { - bio_endio(bio, 0); + bio_endio(bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 74adcd2c967e..efc6659f9d6a 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -134,12 +134,12 @@ static void dec_count(struct io *io, unsigned int region, int error) complete_io(io); } -static void endio(struct bio *bio, int error) +static void endio(struct bio *bio) { struct io *io; unsigned region; - if (error && bio_data_dir(bio) == READ) + if (bio->bi_error && bio_data_dir(bio) == READ) zero_fill_bio(bio); /* @@ -149,7 +149,7 @@ static void endio(struct bio *bio, int error) bio_put(bio); - dec_count(io, region, error); + dec_count(io, region, bio->bi_error); } /*----------------------------------------------------------------- diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index ad1b049ae2ab..e9d17488d5e3 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -146,16 +146,16 @@ static void put_io_block(struct log_writes_c *lc) } } -static void log_end_io(struct bio *bio, int err) +static void log_end_io(struct bio *bio) { struct log_writes_c *lc = bio->bi_private; struct bio_vec *bvec; int i; - if (err) { + if (bio->bi_error) { unsigned long flags; - DMERR("Error writing log block, error=%d", err); + DMERR("Error writing log block, error=%d", bio->bi_error); spin_lock_irqsave(&lc->blocks_lock, flags); lc->logging_enabled = false; spin_unlock_irqrestore(&lc->blocks_lock, flags); @@ -205,7 +205,6 @@ static int write_metadata(struct log_writes_c *lc, void *entry, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; - set_bit(BIO_UPTODATE, &bio->bi_flags); page = alloc_page(GFP_KERNEL); if (!page) { @@ -270,7 +269,6 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; - set_bit(BIO_UPTODATE, &bio->bi_flags); for (i = 0; i < block->vec_cnt; i++) { /* @@ -292,7 +290,6 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; - set_bit(BIO_UPTODATE, &bio->bi_flags); ret = bio_add_page(bio, block->vecs[i].bv_page, block->vecs[i].bv_len, 0); @@ -606,7 +603,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) WARN_ON(flush_bio || fua_bio); if (lc->device_supports_discard) goto map_bio; - bio_endio(bio, 0); + bio_endio(bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index d83696bf403b..e1eabfb2f52d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -490,9 +490,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio) * If device is suspended, complete the bio. */ if (dm_noflush_suspending(ms->ti)) - bio_endio(bio, DM_ENDIO_REQUEUE); + bio->bi_error = DM_ENDIO_REQUEUE; else - bio_endio(bio, -EIO); + bio->bi_error = -EIO; + + bio_endio(bio); return; } @@ -515,7 +517,7 @@ static void read_callback(unsigned long error, void *context) bio_set_m(bio, NULL); if (likely(!error)) { - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -531,7 +533,7 @@ static void read_callback(unsigned long error, void *context) DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", m->dev->name); - bio_endio(bio, -EIO); + bio_io_error(bio); } /* Asynchronous read. */ @@ -580,7 +582,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) if (likely(m)) read_async_bio(m, bio); else - bio_endio(bio, -EIO); + bio_io_error(bio); } } @@ -598,7 +600,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) static void write_callback(unsigned long error, void *context) { - unsigned i, ret = 0; + unsigned i; struct bio *bio = (struct bio *) context; struct mirror_set *ms; int should_wake = 0; @@ -614,7 +616,7 @@ static void write_callback(unsigned long error, void *context) * regions with the same code. */ if (likely(!error)) { - bio_endio(bio, ret); + bio_endio(bio); return; } @@ -623,7 +625,8 @@ static void write_callback(unsigned long error, void *context) * degrade the array. */ if (bio->bi_rw & REQ_DISCARD) { - bio_endio(bio, -EOPNOTSUPP); + bio->bi_error = -EOPNOTSUPP; + bio_endio(bio); return; } @@ -828,13 +831,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) * be wrong if the failed leg returned after reboot and * got replicated back to the good legs.) */ - if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure))) - bio_endio(bio, -EIO); + bio_io_error(bio); else if (errors_handled(ms) && !keep_log(ms)) hold_bio(ms, bio); else - bio_endio(bio, 0); + bio_endio(bio); } } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 7c82d3ccce87..dd8ca0bb0980 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1490,7 +1490,7 @@ out: error_bios(snapshot_bios); } else { if (full_bio) - bio_endio(full_bio, 0); + bio_endio(full_bio); flush_bios(snapshot_bios); } @@ -1580,11 +1580,11 @@ static void start_copy(struct dm_snap_pending_exception *pe) dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } -static void full_bio_end_io(struct bio *bio, int error) +static void full_bio_end_io(struct bio *bio) { void *callback_data = bio->bi_private; - dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0); + dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); } static void start_full_bio(struct dm_snap_pending_exception *pe, diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index a672a1502c14..4f94c7da82f6 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -273,7 +273,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, return DM_MAPIO_REMAPPED; } else { /* The range doesn't map to the target stripe */ - bio_endio(bio, 0); + bio_endio(bio); return DM_MAPIO_SUBMITTED; } } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c33f61a4cc28..2ade2c46dca9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -614,8 +614,10 @@ static void error_bio_list(struct bio_list *bios, int error) { struct bio *bio; - while ((bio = bio_list_pop(bios))) - bio_endio(bio, error); + while ((bio = bio_list_pop(bios))) { + bio->bi_error = error; + bio_endio(bio); + } } static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) @@ -864,14 +866,14 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) complete_mapping_preparation(m); } -static void overwrite_endio(struct bio *bio, int err) +static void overwrite_endio(struct bio *bio) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct dm_thin_new_mapping *m = h->overwrite_mapping; bio->bi_end_io = m->saved_bi_end_io; - m->err = err; + m->err = bio->bi_error; complete_mapping_preparation(m); } @@ -996,7 +998,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio, 0); + bio_endio(bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -1026,7 +1028,7 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) static void process_prepared_discard_success(struct dm_thin_new_mapping *m) { - bio_endio(m->bio, 0); + bio_endio(m->bio); free_discard_mapping(m); } @@ -1040,7 +1042,7 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m) metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); bio_io_error(m->bio); } else - bio_endio(m->bio, 0); + bio_endio(m->bio); cell_defer_no_holder(tc, m->cell); mempool_free(m, tc->pool->mapping_pool); @@ -1111,7 +1113,8 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) * Even if r is set, there could be sub discards in flight that we * need to wait for. */ - bio_endio(m->bio, r); + m->bio->bi_error = r; + bio_endio(m->bio); cell_defer_no_holder(tc, m->cell); mempool_free(m, pool->mapping_pool); } @@ -1487,9 +1490,10 @@ static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) { int error = should_error_unserviceable_bio(pool); - if (error) - bio_endio(bio, error); - else + if (error) { + bio->bi_error = error; + bio_endio(bio); + } else retry_on_resume(bio); } @@ -1625,7 +1629,7 @@ static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_priso * will prevent completion until the sub range discards have * completed. */ - bio_endio(bio, 0); + bio_endio(bio); } static void process_discard_bio(struct thin_c *tc, struct bio *bio) @@ -1639,7 +1643,7 @@ static void process_discard_bio(struct thin_c *tc, struct bio *bio) /* * The discard covers less than a block. */ - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -1784,7 +1788,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block if (bio_data_dir(bio) == READ) { zero_fill_bio(bio); cell_defer_no_holder(tc, cell); - bio_endio(bio, 0); + bio_endio(bio); return; } @@ -1849,7 +1853,7 @@ static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) } else { zero_fill_bio(bio); - bio_endio(bio, 0); + bio_endio(bio); } } else provision_block(tc, bio, block, cell); @@ -1920,7 +1924,7 @@ static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, } zero_fill_bio(bio); - bio_endio(bio, 0); + bio_endio(bio); break; default: @@ -1945,7 +1949,7 @@ static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell static void process_bio_success(struct thin_c *tc, struct bio *bio) { - bio_endio(bio, 0); + bio_endio(bio); } static void process_bio_fail(struct thin_c *tc, struct bio *bio) @@ -2581,7 +2585,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_hook_bio(tc, bio); if (tc->requeue_mode) { - bio_endio(bio, DM_ENDIO_REQUEUE); + bio->bi_error = DM_ENDIO_REQUEUE; + bio_endio(bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index bb9c6a00e4b0..4b34df8fdb58 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -458,8 +458,9 @@ static void verity_finish_io(struct dm_verity_io *io, int error) bio->bi_end_io = io->orig_bi_end_io; bio->bi_private = io->orig_bi_private; + bio->bi_error = error; - bio_endio(bio, error); + bio_endio(bio); } static void verity_work(struct work_struct *w) @@ -469,12 +470,12 @@ static void verity_work(struct work_struct *w) verity_finish_io(io, verity_verify_io(io)); } -static void verity_end_io(struct bio *bio, int error) +static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (error) { - verity_finish_io(io, error); + if (bio->bi_error) { + verity_finish_io(io, bio->bi_error); return; } diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index b9a64bbce304..766bc93006e6 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio) break; } - bio_endio(bio, 0); + bio_endio(bio); /* accepted bio, don't make new request */ return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f331d888e7f5..7f367fcace03 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -944,7 +944,8 @@ static void dec_pending(struct dm_io *io, int error) } else { /* done with normal IO or empty flush */ trace_block_bio_complete(md->queue, bio, io_error); - bio_endio(bio, io_error); + bio->bi_error = io_error; + bio_endio(bio); } } } @@ -957,17 +958,15 @@ static void disable_write_same(struct mapped_device *md) limits->max_write_same_sectors = 0; } -static void clone_endio(struct bio *bio, int error) +static void clone_endio(struct bio *bio) { + int error = bio->bi_error; int r = error; struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; - if (!bio_flagged(bio, BIO_UPTODATE) && !error) - error = -EIO; - if (endio) { r = endio(tio->ti, bio, error); if (r < 0 || r == DM_ENDIO_REQUEUE) @@ -996,7 +995,7 @@ static void clone_endio(struct bio *bio, int error) /* * Partial completion handling for request-based dm */ -static void end_clone_bio(struct bio *clone, int error) +static void end_clone_bio(struct bio *clone) { struct dm_rq_clone_bio_info *info = container_of(clone, struct dm_rq_clone_bio_info, clone); @@ -1013,13 +1012,13 @@ static void end_clone_bio(struct bio *clone, int error) * the remainder. */ return; - else if (error) { + else if (bio->bi_error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed. */ - tio->error = error; + tio->error = bio->bi_error; return; } diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 1277eb26b58a..4a8e15058e8b 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -70,7 +70,7 @@ #include <linux/seq_file.h> -static void faulty_fail(struct bio *bio, int error) +static void faulty_fail(struct bio *bio) { struct bio *b = bio->bi_private; @@ -181,7 +181,7 @@ static void make_request(struct mddev *mddev, struct bio *bio) /* special case - don't decrement, don't generic_make_request, * just fail immediately */ - bio_endio(bio, -EIO); + bio_io_error(bio); return; } diff --git a/drivers/md/linear.c b/drivers/md/linear.c index fa7d577f3d12..aefd66142eef 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -297,7 +297,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) if (unlikely((split->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ - bio_endio(split, 0); + bio_endio(split); } else generic_make_request(split); } while (split != bio); diff --git a/drivers/md/md.c b/drivers/md/md.c index d429c30cd514..ac4381a6625c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -263,7 +263,9 @@ static void md_make_request(struct request_queue *q, struct bio *bio) return; } if (mddev->ro == 1 && unlikely(rw == WRITE)) { - bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); + if (bio_sectors(bio) != 0) + bio->bi_error = -EROFS; + bio_endio(bio); return; } smp_rmb(); /* Ensure implications of 'active' are visible */ @@ -377,7 +379,7 @@ static int md_mergeable_bvec(struct request_queue *q, * Generic flush handling for md */ -static void md_end_flush(struct bio *bio, int err) +static void md_end_flush(struct bio *bio) { struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; @@ -433,7 +435,7 @@ static void md_submit_flush_data(struct work_struct *ws) if (bio->bi_iter.bi_size == 0) /* an empty barrier - all done */ - bio_endio(bio, 0); + bio_endio(bio); else { bio->bi_rw &= ~REQ_FLUSH; mddev->pers->make_request(mddev, bio); @@ -728,15 +730,13 @@ void md_rdev_clear(struct md_rdev *rdev) } EXPORT_SYMBOL_GPL(md_rdev_clear); -static void super_written(struct bio *bio, int error) +static void super_written(struct bio *bio) { struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; - if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { - printk("md: super_written gets error=%d, uptodate=%d\n", - error, test_bit(BIO_UPTODATE, &bio->bi_flags)); - WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); + if (bio->bi_error) { + printk("md: super_written gets error=%d\n", bio->bi_error); md_error(mddev, rdev); } @@ -791,7 +791,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, bio_add_page(bio, page, size, 0); submit_bio_wait(rw, bio); - ret = test_bit(BIO_UPTODATE, &bio->bi_flags); + ret = !bio->bi_error; bio_put(bio); return ret; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index ac3ede2bd00e..082a489af9d3 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -77,18 +77,18 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) struct bio *bio = mp_bh->master_bio; struct mpconf *conf = mp_bh->mddev->private; - bio_endio(bio, err); + bio->bi_error = err; + bio_endio(bio); mempool_free(mp_bh, conf->pool); } -static void multipath_end_request(struct bio *bio, int error) +static void multipath_end_request(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct multipath_bh *mp_bh = bio->bi_private; struct mpconf *conf = mp_bh->mddev->private; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; - if (uptodate) + if (!bio->bi_error) multipath_end_bh_io(mp_bh, 0); else if (!(bio->bi_rw & REQ_RAHEAD)) { /* @@ -101,7 +101,7 @@ static void multipath_end_request(struct bio *bio, int error) (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else - multipath_end_bh_io(mp_bh, error); + multipath_end_bh_io(mp_bh, bio->bi_error); rdev_dec_pending(rdev, conf->mddev); } @@ -123,7 +123,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) mp_bh->path = multipath_map(conf); if (mp_bh->path < 0) { - bio_endio(bio, -EIO); + bio_io_error(bio); mempool_free(mp_bh, conf->pool); return; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index efb654eb5399..e6e0ae56f66b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -543,7 +543,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) if (unlikely((split->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ - bio_endio(split, 0); + bio_endio(split); } else generic_make_request(split); } while (split != bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..9aa7d1fb2bc1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -255,9 +255,10 @@ static void call_bio_endio(struct r1bio *r1_bio) done = 1; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; + if (done) { - bio_endio(bio, 0); + bio_endio(bio); /* * Wake up any possible resync thread that waits for the device * to go idle. @@ -312,9 +313,9 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) return mirror; } -static void raid1_end_read_request(struct bio *bio, int error) +static void raid1_end_read_request(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + int uptodate = !bio->bi_error; struct r1bio *r1_bio = bio->bi_private; int mirror; struct r1conf *conf = r1_bio->mddev->private; @@ -397,9 +398,8 @@ static void r1_bio_write_done(struct r1bio *r1_bio) } } -static void raid1_end_write_request(struct bio *bio, int error) +static void raid1_end_write_request(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r1bio *r1_bio = bio->bi_private; int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); struct r1conf *conf = r1_bio->mddev->private; @@ -410,7 +410,7 @@ static void raid1_end_write_request(struct bio *bio, int error) /* * 'one mirror IO has finished' event handler: */ - if (!uptodate) { + if (bio->bi_error) { set_bit(WriteErrorSeen, &conf->mirrors[mirror].rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -793,7 +793,7 @@ static void flush_pending_writes(struct r1conf *conf) if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ - bio_endio(bio, 0); + bio_endio(bio); else generic_make_request(bio); bio = next; @@ -1068,7 +1068,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ - bio_endio(bio, 0); + bio_endio(bio); else generic_make_request(bio); bio = next; @@ -1734,7 +1734,7 @@ abort: return err; } -static void end_sync_read(struct bio *bio, int error) +static void end_sync_read(struct bio *bio) { struct r1bio *r1_bio = bio->bi_private; @@ -1745,16 +1745,16 @@ static void end_sync_read(struct bio *bio, int error) * or re-read if the read failed. * We don't do much here, just schedule handling by raid1d */ - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (!bio->bi_error) set_bit(R1BIO_Uptodate, &r1_bio->state); if (atomic_dec_and_test(&r1_bio->remaining)) reschedule_retry(r1_bio); } -static void end_sync_write(struct bio *bio, int error) +static void end_sync_write(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + int uptodate = !bio->bi_error; struct r1bio *r1_bio = bio->bi_private; struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; @@ -1941,7 +1941,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) idx ++; } set_bit(R1BIO_Uptodate, &r1_bio->state); - set_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = 0; return 1; } @@ -1965,15 +1965,14 @@ static void process_checks(struct r1bio *r1_bio) for (i = 0; i < conf->raid_disks * 2; i++) { int j; int size; - int uptodate; + int error; struct bio *b = r1_bio->bios[i]; if (b->bi_end_io != end_sync_read) continue; - /* fixup the bio for reuse, but preserve BIO_UPTODATE */ - uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); + /* fixup the bio for reuse, but preserve errno */ + error = b->bi_error; bio_reset(b); - if (!uptodate) - clear_bit(BIO_UPTODATE, &b->bi_flags); + b->bi_error = error; b->bi_vcnt = vcnt; b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_sector = r1_bio->sector + @@ -1996,7 +1995,7 @@ static void process_checks(struct r1bio *r1_bio) } for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && - test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { + !r1_bio->bios[primary]->bi_error) { r1_bio->bios[primary]->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[primary].rdev, mddev); break; @@ -2006,14 +2005,14 @@ static void process_checks(struct r1bio *r1_bio) int j; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; - int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); + int error = sbio->bi_error; if (sbio->bi_end_io != end_sync_read) continue; - /* Now we can 'fixup' the BIO_UPTODATE flag */ - set_bit(BIO_UPTODATE, &sbio->bi_flags); + /* Now we can 'fixup' the error value */ + sbio->bi_error = 0; - if (uptodate) { + if (!error) { for (j = vcnt; j-- ; ) { struct page *p, *s; p = pbio->bi_io_vec[j].bv_page; @@ -2028,7 +2027,7 @@ static void process_checks(struct r1bio *r1_bio) if (j >= 0) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && uptodate)) { + && !error)) { /* No need to write to this device. */ sbio->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[i].rdev, mddev); @@ -2269,11 +2268,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio struct bio *bio = r1_bio->bios[m]; if (bio->bi_end_io == NULL) continue; - if (test_bit(BIO_UPTODATE, &bio->bi_flags) && + if (!bio->bi_error && test_bit(R1BIO_MadeGood, &r1_bio->state)) { rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); } - if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && + if (bio->bi_error && test_bit(R1BIO_WriteError, &r1_bio->state)) { if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) md_error(conf->mddev, rdev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 940f2f365461..929e9a26d81b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -101,7 +101,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore); static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped); static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); -static void end_reshape_write(struct bio *bio, int error); +static void end_reshape_write(struct bio *bio); static void end_reshape(struct r10conf *conf); static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -307,9 +307,9 @@ static void raid_end_bio_io(struct r10bio *r10_bio) } else done = 1; if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; if (done) { - bio_endio(bio, 0); + bio_endio(bio); /* * Wake up any possible resync thread that waits for the device * to go idle. @@ -358,9 +358,9 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, return r10_bio->devs[slot].devnum; } -static void raid10_end_read_request(struct bio *bio, int error) +static void raid10_end_read_request(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + int uptodate = !bio->bi_error; struct r10bio *r10_bio = bio->bi_private; int slot, dev; struct md_rdev *rdev; @@ -438,9 +438,8 @@ static void one_write_done(struct r10bio *r10_bio) } } -static void raid10_end_write_request(struct bio *bio, int error) +static void raid10_end_write_request(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r10bio *r10_bio = bio->bi_private; int dev; int dec_rdev = 1; @@ -460,7 +459,7 @@ static void raid10_end_write_request(struct bio *bio, int error) /* * this branch is our 'one mirror IO has finished' event handler: */ - if (!uptodate) { + if (bio->bi_error) { if (repl) /* Never record new bad blocks to replacement, * just fail it. @@ -957,7 +956,7 @@ static void flush_pending_writes(struct r10conf *conf) if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ - bio_endio(bio, 0); + bio_endio(bio); else generic_make_request(bio); bio = next; @@ -1133,7 +1132,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) if (unlikely((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ - bio_endio(bio, 0); + bio_endio(bio); else generic_make_request(bio); bio = next; @@ -1916,7 +1915,7 @@ abort: return err; } -static void end_sync_read(struct bio *bio, int error) +static void end_sync_read(struct bio *bio) { struct r10bio *r10_bio = bio->bi_private; struct r10conf *conf = r10_bio->mddev->private; @@ -1928,7 +1927,7 @@ static void end_sync_read(struct bio *bio, int error) } else d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (!bio->bi_error) set_bit(R10BIO_Uptodate, &r10_bio->state); else /* The write handler will notice the lack of @@ -1977,9 +1976,8 @@ static void end_sync_request(struct r10bio *r10_bio) } } -static void end_sync_write(struct bio *bio, int error) +static void end_sync_write(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r10bio *r10_bio = bio->bi_private; struct mddev *mddev = r10_bio->mddev; struct r10conf *conf = mddev->private; @@ -1996,7 +1994,7 @@ static void end_sync_write(struct bio *bio, int error) else rdev = conf->mirrors[d].rdev; - if (!uptodate) { + if (bio->bi_error) { if (repl) md_error(mddev, rdev); else { @@ -2044,7 +2042,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) /* find the first device with a block */ for (i=0; i<conf->copies; i++) - if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) + if (!r10_bio->devs[i].bio->bi_error) break; if (i == conf->copies) @@ -2064,7 +2062,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; if (i == first) continue; - if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) { + if (!r10_bio->devs[i].bio->bi_error) { /* We know that the bi_io_vec layout is the same for * both 'first' and 'i', so we just compare them. * All vec entries are PAGE_SIZE; @@ -2706,8 +2704,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev = conf->mirrors[dev].rdev; if (r10_bio->devs[m].bio == NULL) continue; - if (test_bit(BIO_UPTODATE, - &r10_bio->devs[m].bio->bi_flags)) { + if (!r10_bio->devs[m].bio->bi_error) { rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, @@ -2722,8 +2719,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev = conf->mirrors[dev].replacement; if (r10_bio->devs[m].repl_bio == NULL) continue; - if (test_bit(BIO_UPTODATE, - &r10_bio->devs[m].repl_bio->bi_flags)) { + + if (!r10_bio->devs[m].repl_bio->bi_error) { rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, @@ -2748,8 +2745,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) r10_bio->devs[m].addr, r10_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); - } else if (bio != NULL && - !test_bit(BIO_UPTODATE, &bio->bi_flags)) { + } else if (bio != NULL && bio->bi_error) { if (!narrow_write_error(r10_bio, m)) { md_error(conf->mddev, rdev); set_bit(R10BIO_Degraded, @@ -3263,7 +3259,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, bio = r10_bio->devs[i].bio; bio_reset(bio); - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; if (conf->mirrors[d].rdev == NULL || test_bit(Faulty, &conf->mirrors[d].rdev->flags)) continue; @@ -3300,7 +3296,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* Need to set up for writing to the replacement */ bio = r10_bio->devs[i].repl_bio; bio_reset(bio); - clear_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = -EIO; sector = r10_bio->devs[i].addr; atomic_inc(&conf->mirrors[d].rdev->nr_pending); @@ -3377,7 +3373,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { md_sync_acct(bio->bi_bdev, nr_sectors); - set_bit(BIO_UPTODATE, &bio->bi_flags); + bio->bi_error = 0; generic_make_request(bio); } } @@ -4380,7 +4376,7 @@ read_more: read_bio->bi_end_io = end_sync_read; read_bio->bi_rw = READ; read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); - __set_bit(BIO_UPTODATE, &read_bio->bi_flags); + read_bio->bi_error = 0; read_bio->bi_vcnt = 0; read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; @@ -4601,9 +4597,8 @@ static int handle_reshape_read_error(struct mddev *mddev, return 0; } -static void end_reshape_write(struct bio *bio, int error) +static void end_reshape_write(struct bio *bio) { - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct r10bio *r10_bio = bio->bi_private; struct mddev *mddev = r10_bio->mddev; struct r10conf *conf = mddev->private; @@ -4620,7 +4615,7 @@ static void end_reshape_write(struct bio *bio, int error) rdev = conf->mirrors[d].rdev; } - if (!uptodate) { + if (bio->bi_error) { /* FIXME should record badblock */ md_error(mddev, rdev); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 59e44e99eef3..84d6eec1033e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -233,7 +233,7 @@ static void return_io(struct bio *return_bi) bi->bi_iter.bi_size = 0; trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), bi, 0); - bio_endio(bi, 0); + bio_endio(bi); bi = return_bi; } } @@ -887,9 +887,9 @@ static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) } static void -raid5_end_read_request(struct bio *bi, int error); +raid5_end_read_request(struct bio *bi); static void -raid5_end_write_request(struct bio *bi, int error); +raid5_end_write_request(struct bio *bi); static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) { @@ -2277,12 +2277,11 @@ static void shrink_stripes(struct r5conf *conf) conf->slab_cache = NULL; } -static void raid5_end_read_request(struct bio * bi, int error) +static void raid5_end_read_request(struct bio * bi) { struct stripe_head *sh = bi->bi_private; struct r5conf *conf = sh->raid_conf; int disks = sh->disks, i; - int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); char b[BDEVNAME_SIZE]; struct md_rdev *rdev = NULL; sector_t s; @@ -2291,9 +2290,9 @@ static void raid5_end_read_request(struct bio * bi, int error) if (bi == &sh->dev[i].req) break; - pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", + pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), - uptodate); + bi->bi_error); if (i == disks) { BUG(); return; @@ -2312,7 +2311,7 @@ static void raid5_end_read_request(struct bio * bi, int error) s = sh->sector + rdev->new_data_offset; else s = sh->sector + rdev->data_offset; - if (uptodate) { + if (!bi->bi_error) { set_bit(R5_UPTODATE, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) { /* Note that this cannot happen on a @@ -2400,13 +2399,12 @@ static void raid5_end_read_request(struct bio * bi, int error) release_stripe(sh); } -static void raid5_end_write_request(struct bio *bi, int error) +static void raid5_end_write_request(struct bio *bi) { struct stripe_head *sh = bi->bi_private; struct r5conf *conf = sh->raid_conf; int disks = sh->disks, i; struct md_rdev *uninitialized_var(rdev); - int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); sector_t first_bad; int bad_sectors; int replacement = 0; @@ -2429,23 +2427,23 @@ static void raid5_end_write_request(struct bio *bi, int error) break; } } - pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", + pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), - uptodate); + bi->bi_error); if (i == disks) { BUG(); return; } if (replacement) { - if (!uptodate) + if (bi->bi_error) md_error(conf->mddev, rdev); else if (is_badblock(rdev, sh->sector, STRIPE_SECTORS, &first_bad, &bad_sectors)) set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); } else { - if (!uptodate) { + if (bi->bi_error) { set_bit(STRIPE_DEGRADED, &sh->state); set_bit(WriteErrorSeen, &rdev->flags); set_bit(R5_WriteError, &sh->dev[i].flags); @@ -2466,7 +2464,7 @@ static void raid5_end_write_request(struct bio *bi, int error) } rdev_dec_pending(rdev, conf->mddev); - if (sh->batch_head && !uptodate && !replacement) + if (sh->batch_head && bi->bi_error && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) @@ -3107,7 +3105,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); + + bi->bi_error = -EIO; if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; @@ -3131,7 +3130,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); + + bi->bi_error = -EIO; if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; @@ -3156,7 +3156,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - clear_bit(BIO_UPTODATE, &bi->bi_flags); + + bi->bi_error = -EIO; if (!raid5_dec_bi_active_stripes(bi)) { bi->bi_next = *return_bi; *return_bi = bi; @@ -4749,12 +4750,11 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) * first). * If the read failed.. */ -static void raid5_align_endio(struct bio *bi, int error) +static void raid5_align_endio(struct bio *bi) { struct bio* raid_bi = bi->bi_private; struct mddev *mddev; struct r5conf *conf; - int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); struct md_rdev *rdev; bio_put(bi); @@ -4766,10 +4766,10 @@ static void raid5_align_endio(struct bio *bi, int error) rdev_dec_pending(rdev, conf->mddev); - if (!error && uptodate) { + if (!bi->bi_error) { trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), raid_bi, 0); - bio_endio(raid_bi, 0); + bio_endio(raid_bi); if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_quiescent); return; @@ -5133,7 +5133,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { md_write_end(mddev); - bio_endio(bi, 0); + bio_endio(bi); } } @@ -5297,7 +5297,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ - clear_bit(BIO_UPTODATE, &bi->bi_flags); + bi->bi_error = -EIO; break; } } @@ -5311,7 +5311,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), bi, 0); - bio_endio(bi, 0); + bio_endio(bi); } } @@ -5707,7 +5707,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) if (remaining == 0) { trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), raid_bio, 0); - bio_endio(raid_bio, 0); + bio_endio(raid_bio); } if (atomic_dec_and_test(&conf->active_aligned_reads)) wake_up(&conf->wait_for_quiescent); diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 4f97b248c236..0df77cb07df6 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -180,7 +180,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio) * another kernel subsystem, and we just pass it through. */ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - err = -EIO; + bio->bi_error = -EIO; goto out; } @@ -199,6 +199,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio) "io error in %s sector %lld, len %d,\n", (rw == READ) ? "READ" : "WRITE", (unsigned long long) iter.bi_sector, len); + bio->bi_error = err; break; } } @@ -206,7 +207,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio) nd_iostat_end(bio, start); out: - bio_endio(bio, err); + bio_endio(bio); } static int nd_blk_rw_bytes(struct nd_namespace_common *ndns, diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 411c7b2bb37a..341202ed32b4 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1189,7 +1189,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio) * another kernel subsystem, and we just pass it through. */ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - err = -EIO; + bio->bi_error = -EIO; goto out; } @@ -1211,6 +1211,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio) "io error in %s sector %lld, len %d,\n", (rw == READ) ? "READ" : "WRITE", (unsigned long long) iter.bi_sector, len); + bio->bi_error = err; break; } } @@ -1218,7 +1219,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio) nd_iostat_end(bio, start); out: - bio_endio(bio, err); + bio_endio(bio); } static int btt_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index ade9eb917a4d..4c079d5cb539 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -77,7 +77,7 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio) if (bio_data_dir(bio)) wmb_pmem(); - bio_endio(bio, 0); + bio_endio(bio); } static int pmem_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index da212813f2d5..8bcb822b0bac 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -871,7 +871,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) } bytes_done += bvec.bv_len; } - bio_endio(bio, 0); + bio_endio(bio); return; fail: bio_io_error(bio); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 7d4e9397ac31..93856b9b6214 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -220,8 +220,7 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio) index++; } } - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio_endio(bio, 0); + bio_endio(bio); return; fail: bio_io_error(bio); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 6d88d24e6cce..5a9982f5d5d6 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -306,20 +306,13 @@ static void iblock_complete_cmd(struct se_cmd *cmd) kfree(ibr); } -static void iblock_bio_done(struct bio *bio, int err) +static void iblock_bio_done(struct bio *bio) { struct se_cmd *cmd = bio->bi_private; struct iblock_req *ibr = cmd->priv; - /* - * Set -EIO if !BIO_UPTODATE and the passed is still err=0 - */ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) - err = -EIO; - - if (err != 0) { - pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," - " err: %d\n", bio, err); + if (bio->bi_error) { + pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); /* * Bump the ib_bio_err_cnt and release bio. */ @@ -370,15 +363,15 @@ static void iblock_submit_bios(struct bio_list *list, int rw) blk_finish_plug(&plug); } -static void iblock_end_io_flush(struct bio *bio, int err) +static void iblock_end_io_flush(struct bio *bio) { struct se_cmd *cmd = bio->bi_private; - if (err) - pr_err("IBLOCK: cache flush failed: %d\n", err); + if (bio->bi_error) + pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error); if (cmd) { - if (err) + if (bio->bi_error) target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); else target_complete_cmd(cmd, SAM_STAT_GOOD); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 08e9084ee615..de18790eb21c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -852,7 +852,7 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) return bl; } -static void pscsi_bi_endio(struct bio *bio, int error) +static void pscsi_bi_endio(struct bio *bio) { bio_put(bio); } @@ -973,7 +973,7 @@ fail: while (*hbio) { bio = *hbio; *hbio = (*hbio)->bi_next; - bio_endio(bio, 0); /* XXX: should be error */ + bio_endio(bio); } return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } @@ -1061,7 +1061,7 @@ fail_free_bio: while (hbio) { struct bio *bio = hbio; hbio = hbio->bi_next; - bio_endio(bio, 0); /* XXX: should be error */ + bio_endio(bio); } ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; fail: |