diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-03 09:38:06 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-06-09 17:27:32 +0200 |
commit | 4e4cbee93d56137ebff722be022cae5f70ef84fb (patch) | |
tree | 4fa7345155599fc6bdd653fca8c5224ddf90a5be /drivers/md | |
parent | blk-mq: switch ->queue_rq return value to blk_status_t (diff) | |
download | linux-4e4cbee93d56137ebff722be022cae5f70ef84fb.tar.xz linux-4e4cbee93d56137ebff722be022cae5f70ef84fb.zip |
block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md')
33 files changed, 248 insertions, 238 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index c3ea03c9a1a8..dee542fff68e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c) /* Forward declarations */ -void bch_count_io_errors(struct cache *, int, const char *); +void bch_count_io_errors(struct cache *, blk_status_t, const char *); void bch_bbio_count_io_errors(struct cache_set *, struct bio *, - int, const char *); -void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); + blk_status_t, const char *); +void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, + const char *); void bch_bbio_free(struct bio *, struct cache_set *); struct bio *bch_bbio_alloc(struct cache_set *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 450d0e848ae4..866dcf78ff8e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b) bch_submit_bbio(bio, b->c, &b->key, 0); closure_sync(&cl); - if (bio->bi_error) + if (bio->bi_status) set_btree_node_io_error(b); bch_bbio_free(bio, b->c); @@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio) struct closure *cl = bio->bi_private; struct btree *b = container_of(cl, struct btree, io); - if (bio->bi_error) + if (bio->bi_status) set_btree_node_io_error(b); - bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); + bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); closure_put(cl); } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index db45a88c0ce9..6a9b85095e7b 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, /* IO errors */ -void bch_count_io_errors(struct cache *ca, int error, const char *m) +void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m) { /* * The halflife of an error is: @@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m) } void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, - int error, const char *m) + blk_status_t error, const char *m) { struct bbio *b = container_of(bio, struct bbio, bio); struct cache *ca = PTR_CACHE(c, &b->key, 0); @@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, } void bch_bbio_endio(struct cache_set *c, struct bio *bio, - int error, const char *m) + blk_status_t error, const char *m) { struct closure *cl = bio->bi_private; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1198e53d5670..0352d05e495c 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio) { struct journal_write *w = bio->bi_private; - cache_set_err_on(bio->bi_error, w->c, "journal io error"); + cache_set_err_on(bio->bi_status, w->c, "journal io error"); closure_put(&w->c->journal.io); } diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 13b8a907006d..f633b30c962e 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio) struct moving_io *io = container_of(bio->bi_private, struct moving_io, cl); - if (bio->bi_error) - io->op.error = bio->bi_error; + if (bio->bi_status) + io->op.status = bio->bi_status; else if (!KEY_DIRTY(&b->key) && ptr_stale(io->op.c, &b->key, 0)) { - io->op.error = -EINTR; + io->op.status = BLK_STS_IOERR; } - bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); + bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); } static void moving_init(struct moving_io *io) @@ -92,7 +92,7 @@ static void write_moving(struct closure *cl) struct moving_io *io = container_of(cl, struct moving_io, cl); struct data_insert_op *op = &io->op; - if (!op->error) { + if (!op->status) { moving_init(io); io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 709c9cc34369..019b3df9f1c6 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl) if (ret == -ESRCH) { op->replace_collision = true; } else if (ret) { - op->error = -ENOMEM; + op->status = BLK_STS_RESOURCE; op->insert_data_done = true; } @@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio) struct closure *cl = bio->bi_private; struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); - if (bio->bi_error) { + if (bio->bi_status) { /* TODO: We could try to recover from this. */ if (op->writeback) - op->error = bio->bi_error; + op->status = bio->bi_status; else if (!op->replace) set_closure_fn(cl, bch_data_insert_error, op->wq); else set_closure_fn(cl, NULL, NULL); } - bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); + bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); } static void bch_data_insert_start(struct closure *cl) @@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio) * from the backing device. */ - if (bio->bi_error) - s->iop.error = bio->bi_error; + if (bio->bi_status) + s->iop.status = bio->bi_status; else if (!KEY_DIRTY(&b->key) && ptr_stale(s->iop.c, &b->key, 0)) { atomic_long_inc(&s->iop.c->cache_read_races); - s->iop.error = -EINTR; + s->iop.status = BLK_STS_IOERR; } - bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); + bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); } /* @@ -593,9 +593,9 @@ static void request_endio(struct bio *bio) { struct closure *cl = bio->bi_private; - if (bio->bi_error) { + if (bio->bi_status) { struct search *s = container_of(cl, struct search, cl); - s->iop.error = bio->bi_error; + s->iop.status = bio->bi_status; /* Only cache read errors are recoverable */ s->recoverable = false; } @@ -611,7 +611,7 @@ static void bio_complete(struct search *s) &s->d->disk->part0, s->start_time); trace_bcache_request_end(s->d, s->orig_bio); - s->orig_bio->bi_error = s->iop.error; + s->orig_bio->bi_status = s->iop.status; bio_endio(s->orig_bio); s->orig_bio = NULL; } @@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.inode = d->id; s->iop.write_point = hash_long((unsigned long) current, 16); s->iop.write_prio = 0; - s->iop.error = 0; + s->iop.status = 0; s->iop.flags = 0; s->iop.flush_journal = op_is_flush(bio->bi_opf); s->iop.wq = bcache_wq; @@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl) /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); - s->iop.error = 0; + s->iop.status = 0; do_bio_hook(s, s->orig_bio); /* XXX: invalidate cache */ @@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl) !s->cache_miss, s->iop.bypass); trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); - if (s->iop.error) + if (s->iop.status) continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); else if (s->iop.bio || verify(dc, &s->bio.bio)) continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 1ff36875c2b3..7689176951ce 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -10,7 +10,7 @@ struct data_insert_op { unsigned inode; uint16_t write_point; uint16_t write_prio; - short error; + blk_status_t status; union { uint16_t flags; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e57353e39168..fbc4f5412dec 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio) { struct cache *ca = bio->bi_private; - bch_count_io_errors(ca, bio->bi_error, "writing superblock"); + bch_count_io_errors(ca, bio->bi_status, "writing superblock"); closure_put(&ca->set->sb_write); } @@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio) struct closure *cl = bio->bi_private; struct cache_set *c = container_of(cl, struct cache_set, uuid_write); - cache_set_err_on(bio->bi_error, c, "accessing uuids"); + cache_set_err_on(bio->bi_status, c, "accessing uuids"); bch_bbio_free(bio, c); closure_put(cl); } @@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio) { struct cache *ca = bio->bi_private; - cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); + cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); bch_bbio_free(bio, ca->set); closure_put(&ca->prio); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6ac2e48b9235..42c66e76f05e 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio) struct keybuf_key *w = bio->bi_private; struct dirty_io *io = w->private; - if (bio->bi_error) + if (bio->bi_status) SET_KEY_DIRTY(&w->key, false); closure_put(&io->cl); @@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio) struct dirty_io *io = w->private; bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), - bio->bi_error, "reading dirty data from cache"); + bio->bi_status, "reading dirty data from cache"); dirty_endio(bio); } diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c index ae7da2c30a57..82d27384d31f 100644 --- a/drivers/md/dm-bio-prison-v1.c +++ b/drivers/md/dm-bio-prison-v1.c @@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); void dm_cell_error(struct dm_bio_prison *prison, - struct dm_bio_prison_cell *cell, int error) + struct dm_bio_prison_cell *cell, blk_status_t error) { struct bio_list bios; struct bio *bio; @@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison, dm_cell_release(prison, cell, &bios); while ((bio = bio_list_pop(&bios))) { - bio->bi_error = error; + bio->bi_status = error; bio_endio(bio); } } diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h index cddd4ac07e2c..cec52ac5e1ae 100644 --- a/drivers/md/dm-bio-prison-v1.h +++ b/drivers/md/dm-bio-prison-v1.h @@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell, struct bio_list *inmates); void dm_cell_error(struct dm_bio_prison *prison, - struct dm_bio_prison_cell *cell, int error); + struct dm_bio_prison_cell *cell, blk_status_t error); /* * Visits the cell and then releases. Guarantees no new inmates are diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd8139593ccd..0902d2fd1743 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -145,8 +145,8 @@ struct dm_buffer { enum data_mode data_mode; unsigned char list_mode; /* LIST_* */ unsigned hold_count; - int read_error; - int write_error; + blk_status_t read_error; + blk_status_t write_error; unsigned long state; unsigned long last_accessed; struct dm_bufio_client *c; @@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context) { struct dm_buffer *b = context; - b->bio.bi_error = error ? -EIO : 0; + b->bio.bi_status = error ? BLK_STS_IOERR : 0; b->bio.bi_end_io(&b->bio); } @@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, r = dm_io(&io_req, 1, ®ion, NULL); if (r) { - b->bio.bi_error = r; + b->bio.bi_status = errno_to_blk_status(r); end_io(&b->bio); } } @@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, static void inline_endio(struct bio *bio) { bio_end_io_t *end_fn = bio->bi_private; - int error = bio->bi_error; + blk_status_t status = bio->bi_status; /* * Reset the bio to free any attached resources @@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio) */ bio_reset(bio); - bio->bi_error = error; + bio->bi_status = status; end_fn(bio); } @@ -685,11 +685,12 @@ static void write_endio(struct bio *bio) { struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); - b->write_error = bio->bi_error; - if (unlikely(bio->bi_error)) { + b->write_error = bio->bi_status; + if (unlikely(bio->bi_status)) { struct dm_bufio_client *c = b->c; - int error = bio->bi_error; - (void)cmpxchg(&c->async_write_error, 0, error); + + (void)cmpxchg(&c->async_write_error, 0, + blk_status_to_errno(bio->bi_status)); } BUG_ON(!test_bit(B_WRITING, &b->state)); @@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio) { struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); - b->read_error = bio->bi_error; + b->read_error = bio->bi_status; BUG_ON(!test_bit(B_READING, &b->state)); @@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); if (b->read_error) { - int error = b->read_error; + int error = blk_status_to_errno(b->read_error); dm_bufio_release(b); @@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); */ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) { - int a, f; + blk_status_t a; + int f; unsigned long buffers_processed = 0; struct dm_buffer *b, *tmp; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index c48612e6d525..c5ea03fc7ee1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len) */ struct continuation { struct work_struct ws; - int input; + blk_status_t input; }; static inline void init_continuation(struct continuation *k, @@ -145,7 +145,7 @@ struct batcher { /* * The operation that everyone is waiting for. */ - int (*commit_op)(void *context); + blk_status_t (*commit_op)(void *context); void *commit_context; /* @@ -171,8 +171,7 @@ struct batcher { static void __commit(struct work_struct *_ws) { struct batcher *b = container_of(_ws, struct batcher, commit_work); - - int r; + blk_status_t r; unsigned long flags; struct list_head work_items; struct work_struct *ws, *tmp; @@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws) while ((bio = bio_list_pop(&bios))) { if (r) { - bio->bi_error = r; + bio->bi_status = r; bio_endio(bio); } else b->issue_op(bio, b->issue_context); @@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws) } static void batcher_init(struct batcher *b, - int (*commit_op)(void *), + blk_status_t (*commit_op)(void *), void *commit_context, void (*issue_op)(struct bio *bio, void *), void *issue_context, @@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio) dm_unhook_bio(&pb->hook_info, bio); - if (bio->bi_error) { + if (bio->bi_status) { bio_endio(bio); return; } @@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); if (read_err || write_err) - mg->k.input = -EIO; + mg->k.input = BLK_STS_IOERR; queue_continuation(mg->cache->wq, &mg->k); } @@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio) dm_unhook_bio(&pb->hook_info, bio); - if (bio->bi_error) - mg->k.input = bio->bi_error; + if (bio->bi_status) + mg->k.input = bio->bi_status; queue_continuation(mg->cache->wq, &mg->k); } @@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success) if (mg->overwrite_bio) { if (success) force_set_dirty(cache, cblock); + else if (mg->k.input) + mg->overwrite_bio->bi_status = mg->k.input; else - mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); + mg->overwrite_bio->bi_status = BLK_STS_IOERR; bio_endio(mg->overwrite_bio); } else { if (success) @@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws) r = copy(mg, is_policy_promote); if (r) { DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); - mg->k.input = -EIO; + mg->k.input = BLK_STS_IOERR; mg_complete(mg, false); } } @@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown) /* * Used by the batcher. */ -static int commit_op(void *context) +static blk_status_t commit_op(void *context) { struct cache *cache = context; if (dm_cache_changed_this_transaction(cache->cmd)) - return commit(cache, false); + return errno_to_blk_status(commit(cache, false)); return 0; } @@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache) bio_list_init(&cache->deferred_bios); while ((bio = bio_list_pop(&bios))) { - bio->bi_error = DM_ENDIO_REQUEUE; + bio->bi_status = BLK_STS_DM_REQUEUE; bio_endio(bio); } } @@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio) return r; } -static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int cache_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { struct cache *cache = ti->private; unsigned long flags; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f4b51809db21..586cef085c6a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -71,7 +71,7 @@ struct dm_crypt_io { struct convert_context ctx; atomic_t io_pending; - int error; + blk_status_t error; sector_t sector; struct rb_node rb_node; @@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ -static int crypt_convert(struct crypt_config *cc, +static blk_status_t crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { unsigned int tag_offset = 0; @@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc, */ case -EBADMSG: atomic_dec(&ctx->cc_pending); - return -EILSEQ; + return BLK_STS_PROTECTION; /* * There was an error while processing the request. */ default: atomic_dec(&ctx->cc_pending); - return -EIO; + return BLK_STS_IOERR; } } @@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; - int error = io->error; + blk_status_t error = io->error; if (!atomic_dec_and_test(&io->io_pending)) return; @@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) else kfree(io->integrity_metadata); - base_bio->bi_error = error; + base_bio->bi_status = error; bio_endio(base_bio); } @@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone) struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->cc; unsigned rw = bio_data_dir(clone); - int error; + blk_status_t error; /* * free the processed pages @@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone) if (rw == WRITE) crypt_free_buffer_pages(cc, clone); - error = clone->bi_error; + error = clone->bi_status; bio_put(clone); if (rw == READ && !error) { @@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work) crypt_inc_pending(io); if (kcryptd_io_read(io, GFP_NOIO)) - io->error = -ENOMEM; + io->error = BLK_STS_RESOURCE; crypt_dec_pending(io); } @@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) sector_t sector; struct rb_node **rbp, *parent; - if (unlikely(io->error < 0)) { + if (unlikely(io->error)) { crypt_free_buffer_pages(cc, clone); bio_put(clone); crypt_dec_pending(io); @@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct bio *clone; int crypt_finished; sector_t sector = io->sector; - int r; + blk_status_t r; /* * Prevent io from disappearing until this function completes. @@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); if (unlikely(!clone)) { - io->error = -EIO; + io->error = BLK_STS_IOERR; goto dec; } @@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); - if (r < 0) + if (r) io->error = r; crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); @@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; - int r = 0; + blk_status_t r; crypt_inc_pending(io); @@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) io->sector); r = crypt_convert(cc, &io->ctx); - if (r < 0) + if (r) io->error = r; if (atomic_dec_and_test(&io->ctx.cc_pending)) @@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (error == -EBADMSG) { DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); - io->error = -EILSEQ; + io->error = BLK_STS_PROTECTION; } else if (error < 0) - io->error = -EIO; + io->error = BLK_STS_IOERR; crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c9539917a59b..3d04d5ce19d9 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -358,7 +358,8 @@ map_bio: return DM_MAPIO_REMAPPED; } -static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int flakey_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { struct flakey_c *fc = ti->private; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); @@ -377,7 +378,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) * Error read during the down_interval if drop_writes * and error_writes were not configured. */ - *error = -EIO; + *error = BLK_STS_IOERR; } } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ee78fb471229..ccc6ef4d00b9 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -246,7 +246,7 @@ struct dm_integrity_io { unsigned metadata_offset; atomic_t in_flight; - int bi_error; + blk_status_t bi_status; struct completion *completion; @@ -1114,8 +1114,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io * static void do_endio(struct dm_integrity_c *ic, struct bio *bio) { int r = dm_integrity_failed(ic); - if (unlikely(r) && !bio->bi_error) - bio->bi_error = r; + if (unlikely(r) && !bio->bi_status) + bio->bi_status = errno_to_blk_status(r); bio_endio(bio); } @@ -1123,7 +1123,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di { struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); - if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) + if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) submit_flush_bio(ic, dio); else do_endio(ic, bio); @@ -1142,9 +1142,9 @@ static void dec_in_flight(struct dm_integrity_io *dio) bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); - if (unlikely(dio->bi_error) && !bio->bi_error) - bio->bi_error = dio->bi_error; - if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { + if (unlikely(dio->bi_status) && !bio->bi_status) + bio->bi_status = dio->bi_status; + if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { dio->range.logical_sector += dio->range.n_sectors; bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); INIT_WORK(&dio->work, integrity_bio_wait); @@ -1318,7 +1318,7 @@ skip_io: dec_in_flight(dio); return; error: - dio->bi_error = r; + dio->bi_status = errno_to_blk_status(r); dec_in_flight(dio); } @@ -1331,7 +1331,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) sector_t area, offset; dio->ic = ic; - dio->bi_error = 0; + dio->bi_status = 0; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { submit_flush_bio(ic, dio); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..c8f8f3004085 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -124,7 +124,7 @@ static void complete_io(struct io *io) fn(error_bits, context); } -static void dec_count(struct io *io, unsigned int region, int error) +static void dec_count(struct io *io, unsigned int region, blk_status_t error) { if (error) set_bit(region, &io->error_bits); @@ -137,9 +137,9 @@ static void endio(struct bio *bio) { struct io *io; unsigned region; - int error; + blk_status_t error; - if (bio->bi_error && bio_data_dir(bio) == READ) + if (bio->bi_status && bio_data_dir(bio) == READ) zero_fill_bio(bio); /* @@ -147,7 +147,7 @@ static void endio(struct bio *bio) */ retrieve_io_and_region_from_bio(bio, &io, ®ion); - error = bio->bi_error; + error = bio->bi_status; bio_put(bio); dec_count(io, region, error); @@ -319,7 +319,7 @@ static void do_region(int op, int op_flags, unsigned region, if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { - dec_count(io, region, -EOPNOTSUPP); + dec_count(io, region, BLK_STS_NOTSUPP); return; } diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index cc57c7fa1268..a1da0eb58a93 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio) { struct log_writes_c *lc = bio->bi_private; - if (bio->bi_error) { + if (bio->bi_status) { unsigned long flags; - DMERR("Error writing log block, error=%d", bio->bi_error); + DMERR("Error writing log block, error=%d", bio->bi_status); spin_lock_irqsave(&lc->blocks_lock, flags); lc->logging_enabled = false; spin_unlock_irqrestore(&lc->blocks_lock, flags); @@ -664,7 +664,8 @@ map_bio: return DM_MAPIO_REMAPPED; } -static int normal_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int normal_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { struct log_writes_c *lc = ti->private; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 39262e344ae1..a7d2e0840cc5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -565,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; - bio->bi_error = 0; + bio->bi_status = 0; bio->bi_bdev = pgpath->path.dev->bdev; bio->bi_opf |= REQ_FAILFAST_TRANSPORT; @@ -623,10 +623,10 @@ static void process_queued_bios(struct work_struct *work) r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); switch (r) { case DM_MAPIO_KILL: - r = -EIO; - /*FALLTHRU*/ + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); case DM_MAPIO_REQUEUE: - bio->bi_error = r; + bio->bi_status = BLK_STS_DM_REQUEUE; bio_endio(bio); break; case DM_MAPIO_REMAPPED: @@ -1510,7 +1510,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, return r; } -static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *error) +static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, + blk_status_t *error) { struct multipath *m = ti->private; struct dm_mpath_io *mpio = get_mpio_from_bio(clone); @@ -1518,7 +1519,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er unsigned long flags; int r = DM_ENDIO_DONE; - if (!*error || noretry_error(errno_to_blk_status(*error))) + if (!*error || noretry_error(*error)) goto done; if (pgpath) @@ -1527,7 +1528,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *er if (atomic_read(&m->nr_valid_paths) == 0 && !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { dm_report_EIO(m); - *error = -EIO; + *error = BLK_STS_IOERR; goto done; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 77bcf50ce75f..0822e4a6f67d 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio) * If device is suspended, complete the bio. */ if (dm_noflush_suspending(ms->ti)) - bio->bi_error = DM_ENDIO_REQUEUE; + bio->bi_status = BLK_STS_DM_REQUEUE; else - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return; @@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context) * degrade the array. */ if (bio_op(bio) == REQ_OP_DISCARD) { - bio->bi_error = -EOPNOTSUPP; + bio->bi_status = BLK_STS_NOTSUPP; bio_endio(bio); return; } @@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int mirror_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { int rw = bio_data_dir(bio); struct mirror_set *ms = (struct mirror_set *) ti->private; @@ -1255,7 +1256,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) return DM_ENDIO_DONE; } - if (*error == -EOPNOTSUPP) + if (*error == BLK_STS_NOTSUPP) return DM_ENDIO_DONE; if (bio->bi_opf & REQ_RAHEAD) @@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) bd = &bio_record->details; dm_bio_restore(bd, bio); - bio->bi_error = 0; + bio->bi_status = 0; queue_bio(ms, bio, rw); return DM_ENDIO_INCOMPLETE; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 63402f8a38de..fafd5326e572 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; - blk_status_t error = errno_to_blk_status(clone->bi_error); + blk_status_t error = clone->bi_status; bio_put(clone); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 79a845798e2f..1ba41048b438 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio) { void *callback_data = bio->bi_private; - dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); + dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); } static void start_full_bio(struct dm_snap_pending_exception *pe, @@ -1851,7 +1851,8 @@ out_unlock: return r; } -static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int snapshot_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { struct dm_snapshot *s = ti->private; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 49888bc2c909..11621a0af887 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -375,7 +375,8 @@ static void stripe_status(struct dm_target *ti, status_type_t type, } } -static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) +static int stripe_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error) { unsigned i; char major_minor[16]; @@ -387,7 +388,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) if (bio->bi_opf & REQ_RAHEAD) return DM_ENDIO_DONE; - if (*error == -EOPNOTSUPP) + if (*error == BLK_STS_NOTSUPP) return DM_ENDIO_DONE; memset(major_minor, 0, sizeof(major_minor)); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 22b1a64c44b7..3490b300cbff 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r) * Even if r is set, there could be sub discards in flight that we * need to wait for. */ - if (r && !op->parent_bio->bi_error) - op->parent_bio->bi_error = r; + if (r && !op->parent_bio->bi_status) + op->parent_bio->bi_status = errno_to_blk_status(r); bio_endio(op->parent_bio); } @@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool, } static void cell_error_with_code(struct pool *pool, - struct dm_bio_prison_cell *cell, int error_code) + struct dm_bio_prison_cell *cell, blk_status_t error_code) { dm_cell_error(pool->prison, cell, error_code); dm_bio_prison_free_cell(pool->prison, cell); } -static int get_pool_io_error_code(struct pool *pool) +static blk_status_t get_pool_io_error_code(struct pool *pool) { - return pool->out_of_data_space ? -ENOSPC : -EIO; + return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; } static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) { - int error = get_pool_io_error_code(pool); - - cell_error_with_code(pool, cell, error); + cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); } static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) @@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) { - cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); + cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); } /*----------------------------------------------------------------*/ @@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master) bio_list_init(master); } -static void error_bio_list(struct bio_list *bios, int error) +static void error_bio_list(struct bio_list *bios, blk_status_t error) { struct bio *bio; while ((bio = bio_list_pop(bios))) { - bio->bi_error = error; + bio->bi_status = error; bio_endio(bio); } } -static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) +static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, + blk_status_t error) { struct bio_list bios; unsigned long flags; @@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc) __merge_bio_list(&bios, &tc->retry_on_resume_list); spin_unlock_irqrestore(&tc->lock, flags); - error_bio_list(&bios, DM_ENDIO_REQUEUE); + error_bio_list(&bios, BLK_STS_DM_REQUEUE); requeue_deferred_cells(tc); } -static void error_retry_list_with_code(struct pool *pool, int error) +static void error_retry_list_with_code(struct pool *pool, blk_status_t error) { struct thin_c *tc; @@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error) static void error_retry_list(struct pool *pool) { - int error = get_pool_io_error_code(pool); - - error_retry_list_with_code(pool, error); + error_retry_list_with_code(pool, get_pool_io_error_code(pool)); } /* @@ -774,7 +771,7 @@ struct dm_thin_new_mapping { */ atomic_t prepare_actions; - int err; + blk_status_t status; struct thin_c *tc; dm_block_t virt_begin, virt_end; dm_block_t data_block; @@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) { struct dm_thin_new_mapping *m = context; - m->err = read_err || write_err ? -EIO : 0; + m->status = read_err || write_err ? BLK_STS_IOERR : 0; complete_mapping_preparation(m); } @@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio) bio->bi_end_io = m->saved_bi_end_io; - m->err = bio->bi_error; + m->status = bio->bi_status; complete_mapping_preparation(m); } @@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) struct bio *bio = m->bio; int r; - if (m->err) { + if (m->status) { cell_error(pool, m->cell); goto out; } @@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&tc->lock, flags); } -static int should_error_unserviceable_bio(struct pool *pool) +static blk_status_t should_error_unserviceable_bio(struct pool *pool) { enum pool_mode m = get_pool_mode(pool); @@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool) case PM_WRITE: /* Shouldn't get here */ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); - return -EIO; + return BLK_STS_IOERR; case PM_OUT_OF_DATA_SPACE: - return pool->pf.error_if_no_space ? -ENOSPC : 0; + return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; case PM_READ_ONLY: case PM_FAIL: - return -EIO; + return BLK_STS_IOERR; default: /* Shouldn't get here */ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); - return -EIO; + return BLK_STS_IOERR; } } static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) { - int error = should_error_unserviceable_bio(pool); + blk_status_t error = should_error_unserviceable_bio(pool); if (error) { - bio->bi_error = error; + bio->bi_status = error; bio_endio(bio); } else retry_on_resume(bio); @@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c { struct bio *bio; struct bio_list bios; - int error; + blk_status_t error; error = should_error_unserviceable_bio(pool); if (error) { @@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc) unsigned count = 0; if (tc->requeue_mode) { - error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); + error_thin_bio_list(tc, &tc->deferred_bio_list, + BLK_STS_DM_REQUEUE); return; } @@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws) if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { pool->pf.error_if_no_space = true; notify_of_pool_mode_change_to_oods(pool); - error_retry_list_with_code(pool, -ENOSPC); + error_retry_list_with_code(pool, BLK_STS_NOSPC); } } @@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_hook_bio(tc, bio); if (tc->requeue_mode) { - bio->bi_error = DM_ENDIO_REQUEUE; + bio->bi_status = BLK_STS_DM_REQUEUE; bio_endio(bio); return DM_MAPIO_SUBMITTED; } @@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio) return thin_bio_map(ti, bio); } -static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) +static int thin_endio(struct dm_target *ti, struct bio *bio, + blk_status_t *err) { unsigned long flags; struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 9ed55468b98b..2dca66eb67e1 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io) /* * End one "io" structure with a given error. */ -static void verity_finish_io(struct dm_verity_io *io, int error) +static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) { struct dm_verity *v = io->v; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); bio->bi_end_io = io->orig_bi_end_io; - bio->bi_error = error; + bio->bi_status = status; verity_fec_finish_io(io); @@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w) { struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); - verity_finish_io(io, verity_verify_io(io)); + verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); } static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_error && !verity_fec_is_enabled(io->v)) { - verity_finish_io(io, bio->bi_error); + if (bio->bi_status && !verity_fec_is_enabled(io->v)) { + verity_finish_io(io, bio->bi_status); return; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7a7047211c64..f38f9dd5cbdd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue; */ struct dm_io { struct mapped_device *md; - int error; + blk_status_t status; atomic_t io_count; struct bio *bio; unsigned long start_time; @@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md) * Decrements the number of outstanding ios that a bio has been * cloned into, completing the original io if necc. */ -static void dec_pending(struct dm_io *io, int error) +static void dec_pending(struct dm_io *io, blk_status_t error) { unsigned long flags; - int io_error; + blk_status_t io_error; struct bio *bio; struct mapped_device *md = io->md; /* Push-back supersedes any I/O errors */ if (unlikely(error)) { spin_lock_irqsave(&io->endio_lock, flags); - if (!(io->error > 0 && __noflush_suspending(md))) - io->error = error; + if (!(io->status == BLK_STS_DM_REQUEUE && + __noflush_suspending(md))) + io->status = error; spin_unlock_irqrestore(&io->endio_lock, flags); } if (atomic_dec_and_test(&io->io_count)) { - if (io->error == DM_ENDIO_REQUEUE) { + if (io->status == BLK_STS_DM_REQUEUE) { /* * Target requested pushing back the I/O. */ @@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error) bio_list_add_head(&md->deferred, io->bio); else /* noflush suspend was interrupted. */ - io->error = -EIO; + io->status = BLK_STS_IOERR; spin_unlock_irqrestore(&md->deferred_lock, flags); } - io_error = io->error; + io_error = io->status; bio = io->bio; end_io_acct(io); free_io(md, io); - if (io_error == DM_ENDIO_REQUEUE) + if (io_error == BLK_STS_DM_REQUEUE) return; if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { @@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - bio->bi_error = io_error; + bio->bi_status = io_error; bio_endio(bio); } } @@ -838,14 +839,13 @@ void disable_write_zeroes(struct mapped_device *md) static void clone_endio(struct bio *bio) { - int error = bio->bi_error; - int r = error; + blk_status_t error = bio->bi_status; struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; - if (unlikely(error == -EREMOTEIO)) { + if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) disable_write_same(md); @@ -855,10 +855,10 @@ static void clone_endio(struct bio *bio) } if (endio) { - r = endio(tio->ti, bio, &error); + int r = endio(tio->ti, bio, &error); switch (r) { case DM_ENDIO_REQUEUE: - error = DM_ENDIO_REQUEUE; + error = BLK_STS_DM_REQUEUE; /*FALLTHRU*/ case DM_ENDIO_DONE: break; @@ -1094,11 +1094,11 @@ static void __map_bio(struct dm_target_io *tio) generic_make_request(clone); break; case DM_MAPIO_KILL: - r = -EIO; - /*FALLTHRU*/ + dec_pending(tio->io, BLK_STS_IOERR); + free_tio(tio); + break; case DM_MAPIO_REQUEUE: - /* error the io and bail out, or requeue it if needed */ - dec_pending(tio->io, r); + dec_pending(tio->io, BLK_STS_DM_REQUEUE); free_tio(tio); break; default: @@ -1366,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md, ci.map = map; ci.md = md; ci.io = alloc_io(md); - ci.io->error = 0; + ci.io->status = 0; atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; diff --git a/drivers/md/md.c b/drivers/md/md.c index 10367ffe92e3..6452e83fd650 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) } if (mddev->ro == 1 && unlikely(rw == WRITE)) { if (bio_sectors(bio) != 0) - bio->bi_error = -EROFS; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; } @@ -719,8 +719,8 @@ static void super_written(struct bio *bio) struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; - if (bio->bi_error) { - pr_err("md: super_written gets error=%d\n", bio->bi_error); + if (bio->bi_status) { + pr_err("md: super_written gets error=%d\n", bio->bi_status); md_error(mddev, rdev); if (!test_bit(Faulty, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { @@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, submit_bio_wait(bio); - ret = !bio->bi_error; + ret = !bio->bi_status; bio_put(bio); return ret; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index e95d521d93e9..68d036e64041 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) * operation and are ready to return a success/failure code to the buffer * cache layer. */ -static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) +static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status) { struct bio *bio = mp_bh->master_bio; struct mpconf *conf = mp_bh->mddev->private; - bio->bi_error = err; + bio->bi_status = status; bio_endio(bio); mempool_free(mp_bh, conf->pool); } @@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio) struct mpconf *conf = mp_bh->mddev->private; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; - if (!bio->bi_error) + if (!bio->bi_status) multipath_end_bh_io(mp_bh, 0); else if (!(bio->bi_opf & REQ_RAHEAD)) { /* @@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio) (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else - multipath_end_bh_io(mp_bh, bio->bi_error); + multipath_end_bh_io(mp_bh, bio->bi_status); rdev_dec_pending(rdev, conf->mddev); } @@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread) pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", bdevname(bio->bi_bdev,b), (unsigned long long)bio->bi_iter.bi_sector); - multipath_end_bh_io(mp_bh, -EIO); + multipath_end_bh_io(mp_bh, BLK_STS_IOERR); } else { pr_err("multipath: %s: redirecting sector %llu to another IO path\n", bdevname(bio->bi_bdev,b), diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index af5056d56878..94b87c4d0f7b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio) struct r1conf *conf = r1_bio->mddev->private; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); /* @@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) static void raid1_end_read_request(struct bio *bio) { - int uptodate = !bio->bi_error; + int uptodate = !bio->bi_status; struct r1bio *r1_bio = bio->bi_private; struct r1conf *conf = r1_bio->mddev->private; struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; @@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio) struct md_rdev *rdev = conf->mirrors[mirror].rdev; bool discard_error; - discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; + discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; /* * 'one mirror IO has finished' event handler: */ - if (bio->bi_error && !discard_error) { + if (bio->bi_status && !discard_error) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & @@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) @@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio) * or re-read if the read failed. * We don't do much here, just schedule handling by raid1d */ - if (!bio->bi_error) + if (!bio->bi_status) set_bit(R1BIO_Uptodate, &r1_bio->state); if (atomic_dec_and_test(&r1_bio->remaining)) @@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio) static void end_sync_write(struct bio *bio) { - int uptodate = !bio->bi_error; + int uptodate = !bio->bi_status; struct r1bio *r1_bio = get_resync_r1bio(bio); struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; @@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) idx ++; } set_bit(R1BIO_Uptodate, &r1_bio->state); - bio->bi_error = 0; + bio->bi_status = 0; return 1; } @@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio) for (i = 0; i < conf->raid_disks * 2; i++) { int j; int size; - int error; + blk_status_t status; struct bio_vec *bi; struct bio *b = r1_bio->bios[i]; struct resync_pages *rp = get_resync_pages(b); if (b->bi_end_io != end_sync_read) continue; /* fixup the bio for reuse, but preserve errno */ - error = b->bi_error; + status = b->bi_status; bio_reset(b); - b->bi_error = error; + b->bi_status = status; b->bi_vcnt = vcnt; b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_sector = r1_bio->sector + @@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio) } for (primary = 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io == end_sync_read && - !r1_bio->bios[primary]->bi_error) { + !r1_bio->bios[primary]->bi_status) { r1_bio->bios[primary]->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[primary].rdev, mddev); break; @@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio) int j; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; - int error = sbio->bi_error; + blk_status_t status = sbio->bi_status; struct page **ppages = get_resync_pages(pbio)->pages; struct page **spages = get_resync_pages(sbio)->pages; struct bio_vec *bi; @@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio) if (sbio->bi_end_io != end_sync_read) continue; /* Now we can 'fixup' the error value */ - sbio->bi_error = 0; + sbio->bi_status = 0; bio_for_each_segment_all(bi, sbio, j) page_len[j] = bi->bv_len; - if (!error) { + if (!status) { for (j = vcnt; j-- ; ) { if (memcmp(page_address(ppages[j]), page_address(spages[j]), @@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio) if (j >= 0) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && !error)) { + && !status)) { /* No need to write to this device. */ sbio->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[i].rdev, mddev); @@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio struct bio *bio = r1_bio->bios[m]; if (bio->bi_end_io == NULL) continue; - if (!bio->bi_error && + if (!bio->bi_status && test_bit(R1BIO_MadeGood, &r1_bio->state)) { rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); } - if (bio->bi_error && + if (bio->bi_status && test_bit(R1BIO_WriteError, &r1_bio->state)) { if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) md_error(conf->mddev, rdev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4343d7ff9916..89ad1cd29037 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio) struct r10conf *conf = r10_bio->mddev->private; if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); /* @@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, static void raid10_end_read_request(struct bio *bio) { - int uptodate = !bio->bi_error; + int uptodate = !bio->bi_status; struct r10bio *r10_bio = bio->bi_private; int slot, dev; struct md_rdev *rdev; @@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio) struct bio *to_put = NULL; bool discard_error; - discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; + discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); @@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio) /* * this branch is our 'one mirror IO has finished' event handler: */ - if (bio->bi_error && !discard_error) { + if (bio->bi_status && !discard_error) { if (repl) /* Never record new bad blocks to replacement, * just fail it. @@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) @@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) bio->bi_next = NULL; bio->bi_bdev = rdev->bdev; if (test_bit(Faulty, &rdev->flags)) { - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; bio_endio(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) @@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) { struct r10conf *conf = r10_bio->mddev->private; - if (!bio->bi_error) + if (!bio->bi_status) set_bit(R10BIO_Uptodate, &r10_bio->state); else /* The write handler will notice the lack of @@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio) else rdev = conf->mirrors[d].rdev; - if (bio->bi_error) { + if (bio->bi_status) { if (repl) md_error(mddev, rdev); else { @@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) /* find the first device with a block */ for (i=0; i<conf->copies; i++) - if (!r10_bio->devs[i].bio->bi_error) + if (!r10_bio->devs[i].bio->bi_status) break; if (i == conf->copies) @@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tpages = get_resync_pages(tbio)->pages; d = r10_bio->devs[i].devnum; rdev = conf->mirrors[d].rdev; - if (!r10_bio->devs[i].bio->bi_error) { + if (!r10_bio->devs[i].bio->bi_status) { /* We know that the bi_io_vec layout is the same for * both 'first' and 'i', so we just compare them. * All vec entries are PAGE_SIZE; @@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev = conf->mirrors[dev].rdev; if (r10_bio->devs[m].bio == NULL) continue; - if (!r10_bio->devs[m].bio->bi_error) { + if (!r10_bio->devs[m].bio->bi_status) { rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, @@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) if (r10_bio->devs[m].repl_bio == NULL) continue; - if (!r10_bio->devs[m].repl_bio->bi_error) { + if (!r10_bio->devs[m].repl_bio->bi_status) { rdev_clear_badblocks( rdev, r10_bio->devs[m].addr, @@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) r10_bio->devs[m].addr, r10_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); - } else if (bio != NULL && bio->bi_error) { + } else if (bio != NULL && bio->bi_status) { fail = true; if (!narrow_write_error(r10_bio, m)) { md_error(conf->mddev, rdev); @@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->devs[i].repl_bio->bi_end_io = NULL; bio = r10_bio->devs[i].bio; - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; rcu_read_lock(); rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { @@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* Need to set up for writing to the replacement */ bio = r10_bio->devs[i].repl_bio; - bio->bi_error = -EIO; + bio->bi_status = BLK_STS_IOERR; sector = r10_bio->devs[i].addr; bio->bi_next = biolist; @@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { md_sync_acct(bio->bi_bdev, nr_sectors); - bio->bi_error = 0; + bio->bi_status = 0; generic_make_request(bio); } } @@ -4394,7 +4394,7 @@ read_more: read_bio->bi_end_io = end_reshape_read; bio_set_op_attrs(read_bio, REQ_OP_READ, 0); read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); - read_bio->bi_error = 0; + read_bio->bi_status = 0; read_bio->bi_vcnt = 0; read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; @@ -4638,7 +4638,7 @@ static void end_reshape_write(struct bio *bio) rdev = conf->mirrors[d].rdev; } - if (bio->bi_error) { + if (bio->bi_status) { /* FIXME should record badblock */ md_error(mddev, rdev); } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4c00bc248287..3ed6a0d89db8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio) struct r5l_log *log = io->log; unsigned long flags; - if (bio->bi_error) + if (bio->bi_status) md_error(log->rdev->mddev, log->rdev); bio_put(bio); @@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio) unsigned long flags; struct r5l_io_unit *io; - if (bio->bi_error) + if (bio->bi_status) md_error(log->rdev->mddev, log->rdev); spin_lock_irqsave(&log->io_list_lock, flags); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 5d25bebf3328..09e04be34e5f 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio) pr_debug("%s: seq: %llu\n", __func__, io->seq); - if (bio->bi_error) + if (bio->bi_status) md_error(ppl_conf->mddev, log->rdev); list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9c4f7659f8b1..e1bdc320f664 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi) pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), - bi->bi_error); + bi->bi_status); if (i == disks) { bio_reset(bi); BUG(); @@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi) s = sh->sector + rdev->new_data_offset; else s = sh->sector + rdev->data_offset; - if (!bi->bi_error) { + if (!bi->bi_status) { set_bit(R5_UPTODATE, &sh->dev[i].flags); if (test_bit(R5_ReadError, &sh->dev[i].flags)) { /* Note that this cannot happen on a @@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi) } pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), - bi->bi_error); + bi->bi_status); if (i == disks) { bio_reset(bi); BUG(); @@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi) } if (replacement) { - if (bi->bi_error) + if (bi->bi_status) md_error(conf->mddev, rdev); else if (is_badblock(rdev, sh->sector, STRIPE_SECTORS, &first_bad, &bad_sectors)) set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); } else { - if (bi->bi_error) { + if (bi->bi_status) { set_bit(STRIPE_DEGRADED, &sh->state); set_bit(WriteErrorSeen, &rdev->flags); set_bit(R5_WriteError, &sh->dev[i].flags); @@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi) } rdev_dec_pending(rdev, conf->mddev); - if (sh->batch_head && bi->bi_error && !replacement) + if (sh->batch_head && bi->bi_status && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); bio_reset(bi); @@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_error = -EIO; + bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); bio_endio(bi); bi = nextbi; @@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_error = -EIO; + bi->bi_status = BLK_STS_IOERR; md_write_end(conf->mddev); bio_endio(bi); bi = bi2; @@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); - bi->bi_error = -EIO; + bi->bi_status = BLK_STS_IOERR; bio_endio(bi); bi = nextbi; } @@ -5144,7 +5144,7 @@ static void raid5_align_endio(struct bio *bi) struct mddev *mddev; struct r5conf *conf; struct md_rdev *rdev; - int error = bi->bi_error; + blk_status_t error = bi->bi_status; bio_put(bi); @@ -5721,7 +5721,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ - bi->bi_error = -EIO; + bi->bi_status = BLK_STS_IOERR; break; } } |