summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-08-05 23:35:16 +0200
committerJens Axboe <axboe@fb.com>2016-08-07 22:41:02 +0200
commit1eff9d322a444245c67515edb52bc0eb68374aa8 (patch)
treeaed4c3bfdf94202b93b9b5ce74c6e247f4c3ab85 /block
parenttarget: iblock_execute_sync_cache() should use bio_set_op_attrs() (diff)
downloadlinux-1eff9d322a444245c67515edb52bc0eb68374aa8.tar.xz
linux-1eff9d322a444245c67515edb52bc0eb68374aa8.zip
block: rename bio bi_rw to bi_opf
Since commit 63a4cc24867d, bio->bi_rw contains flags in the lower portion and the op code in the higher portions. This means that old code that relies on manually setting bi_rw is most likely going to be broken. Instead of letting that brokeness linger, rename the member, to force old and out-of-tree code to break at compile time instead of at runtime. No intended functional changes in this commit. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c6
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-throttle.c8
-rw-r--r--block/cfq-iosched.c4
7 files changed, 32 insertions, 32 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index f70cc3bdfd01..63f72f00c72e 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
bip->bip_bio = bio;
bio->bi_integrity = bip;
- bio->bi_rw |= REQ_INTEGRITY;
+ bio->bi_opf |= REQ_INTEGRITY;
return bip;
err:
diff --git a/block/bio.c b/block/bio.c
index 3f76a38a5e2d..f39477538fef 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
*/
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
@@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
if (!bio)
return NULL;
bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
@@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
init_completion(&ret.event);
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
- bio->bi_rw |= REQ_SYNC;
+ bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
wait_for_completion_io(&ret.event);
diff --git a/block/blk-core.c b/block/blk-core.c
index a687e9cc16c2..999442ec4601 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
return false;
return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(q, req, bio))
return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(q, req, bio))
return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
- req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
- if (bio->bi_rw & REQ_RAHEAD)
+ req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
+ if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
@@ -1728,7 +1728,7 @@ get_rq:
/*
* Add in META/PRIO flags, if set, before we get to the IO scheduler
*/
- rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+ rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
/*
* Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
- bio->bi_rw,
+ bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+ if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
- bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
* one.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_rw & ff) != ff)
+ if ((bio->bi_opf & ff) != ff)
break;
bytes += bio->bi_iter.bi_size;
}
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
- req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
/*
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 41cbd4878958..3eec75a9e91d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
if (split) {
/* there isn't chance to merge the splitted bio */
- split->bi_rw |= REQ_NOMERGE;
+ split->bi_opf |= REQ_NOMERGE;
bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
@@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
* Distributes the attributs to each bio.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
- WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
- (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
- bio->bi_rw |= ff;
+ WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
+ (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
+ bio->bi_opf |= ff;
}
rq->cmd_flags |= REQ_MIXED_MERGE;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6a63da101bc4..e931a0e8e73d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rw_is_sync(bio_op(bio), bio->bi_rw))
+ if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, op);
@@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
- const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_map_ctx data;
struct request *rq;
unsigned int request_count = 0;
@@ -1396,8 +1396,8 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
- const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
struct blk_map_ctx data;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index c5494e403239..f1aba26f4719 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
- if (!(bio->bi_rw & REQ_THROTTLED))
- bio->bi_rw |= REQ_THROTTLED;
+ if (!(bio->bi_opf & REQ_THROTTLED))
+ bio->bi_opf |= REQ_THROTTLED;
}
/**
@@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
- if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
+ if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
goto out;
spin_lock_irq(q->queue_lock);
@@ -1478,7 +1478,7 @@ out:
* being issued.
*/
if (!throttled)
- bio->bi_rw &= ~REQ_THROTTLED;
+ bio->bi_opf &= ~REQ_THROTTLED;
return throttled;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index acabba198de9..cc2f6dbd4303 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
- return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
+ return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
}
/*
@@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
}
static void