summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-28 16:48:16 +0200
committerJens Axboe <axboe@fb.com>2016-10-28 16:48:16 +0200
commitef295ecf090d3e86e5b742fc6ab34f1122a43773 (patch)
treee52e8be789b5654dfc43f626c50261f80615c03f /block
parentblock: split out request-only flags into a new namespace (diff)
downloadlinux-ef295ecf090d3e86e5b742fc6ab34f1122a43773.tar.xz
linux-ef295ecf090d3e86e5b742fc6ab34f1122a43773.zip
block: better op and flags encoding
Now that we don't need the common flags to overflow outside the range of a 32-bit type we can encode them the same way for both the bio and request fields. This in addition allows us to place the operation first (and make some room for more ops while we're at it) and to stop having to shift around the operation values. In addition this allows passing around only one value in the block layer instead of two (and eventuall also in the file systems, but we can do that later) and thus clean up a lot of code. Last but not least this allows decreasing the size of the cmd_flags field in struct request to 32-bits. Various functions passing this value could also be updated, but I'd like to avoid the churn for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c60
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c28
-rw-r--r--block/cfq-iosched.c66
-rw-r--r--block/elevator.c4
7 files changed, 69 insertions, 95 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fd416651a676..0bfaa54d3e9f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1056,8 +1056,7 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1068,23 +1067,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *__get_request(struct request_list *rl, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;
if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
- may_queue = elv_may_queue(q, op, op_flags);
+ may_queue = elv_may_queue(q, op);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -1154,7 +1152,7 @@ static struct request *__get_request(struct request_list *rl, int op,
blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
- req_set_op_attrs(rq, op, op_flags);
+ rq->cmd_flags = op;
rq->rq_flags = rq_flags;
/* init elvpriv */
@@ -1232,8 +1230,7 @@ rq_starved:
/**
* get_request - get a free request
* @q: request_queue to allocate request from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
@@ -1244,18 +1241,17 @@ rq_starved:
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int op,
- int op_flags, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, unsigned int op,
+ struct bio *bio, gfp_t gfp_mask)
{
- const bool is_sync = rw_is_sync(op, op_flags) != 0;
+ const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, op, op_flags, bio, gfp_mask);
+ rq = __get_request(rl, op, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;
@@ -1297,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, rw, 0, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
return rq;
@@ -1446,7 +1442,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
- bool sync = rw_is_sync(req_op(req), req->cmd_flags);
+ bool sync = op_is_sync(req->cmd_flags);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
@@ -1652,8 +1648,6 @@ out:
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
-
- req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
@@ -1665,9 +1659,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
- int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
+ int el_ret, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
@@ -1723,23 +1716,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
get_rq:
/*
- * This sync check and mask will be re-done in init_request_from_bio(),
- * but we need to set it earlier to expose the sync flag to the
- * rq allocator and io schedulers.
- */
- if (sync)
- rw_flags |= REQ_SYNC;
-
- /*
- * Add in META/PRIO flags, if set, before we get to the IO scheduler
- */
- rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
-
- /*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
+ req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) {
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
@@ -2946,8 +2926,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- req_set_op(rq, bio_op(bio));
-
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -3031,8 +3009,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- req_set_op_attrs(dst, req_op(src),
- (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
+ dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
@@ -3537,8 +3514,11 @@ EXPORT_SYMBOL(blk_set_runtime_active);
int __init blk_dev_init(void)
{
- BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct request, cmd_flags));
+ BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
+ FIELD_SIZEOF(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3990b9cfbda5..95f1d4d357df 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}
flush_rq->cmd_type = REQ_TYPE_FS;
- req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH);
+ flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 46fe9248410d..18abda862915 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
- enum req_op op;
+ unsigned int op;
int alignment;
sector_t bs_mask;
diff --git a/block/blk-map.c b/block/blk-map.c
index 2c5ae5fef473..0173a72a8aa9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,6 +16,8 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
+ rq->cmd_flags &= REQ_OP_MASK;
+ rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b49c6658eb05..2da1a0ee3318 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -139,14 +139,13 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
EXPORT_SYMBOL(blk_mq_can_queue);
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
- struct request *rq, int op,
- unsigned int op_flags)
+ struct request *rq, unsigned int op)
{
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
- req_set_op_attrs(rq, op, op_flags);
+ rq->cmd_flags = op;
if (blk_queue_io_stat(q))
rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
@@ -183,11 +182,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL;
rq->next_rq = NULL;
- ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
+ ctx->rq_dispatched[op_is_sync(op)]++;
}
static struct request *
-__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
{
struct request *rq;
unsigned int tag;
@@ -202,7 +201,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
}
rq->tag = tag;
- blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
+ blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
return rq;
}
@@ -225,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
blk_mq_put_ctx(ctx);
if (!rq) {
@@ -277,7 +276,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+ rq = __blk_mq_alloc_request(&alloc_data, rw);
if (!rq) {
ret = -EWOULDBLOCK;
goto out_queue_exit;
@@ -1196,19 +1195,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
- int op = bio_data_dir(bio);
- int op_flags = 0;
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
- if (rw_is_sync(bio_op(bio), bio->bi_opf))
- op_flags |= REQ_SYNC;
-
- trace_block_getrq(q, bio, op);
+ trace_block_getrq(q, bio, bio->bi_opf);
blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
- rq = __blk_mq_alloc_request(data, op, op_flags);
+ rq = __blk_mq_alloc_request(data, bio->bi_opf);
data->hctx->queued++;
return rq;
@@ -1256,7 +1250,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_mq_alloc_data data;
struct request *rq;
@@ -1350,7 +1344,7 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5e24d880306c..c96186adaa66 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -667,10 +667,10 @@ static inline void cfqg_put(struct cfq_group *cfqg)
} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op,
- int op_flags)
+ struct cfq_group *curr_cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, 1);
cfqg_stats_end_empty_time(&cfqg->stats);
cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
}
@@ -684,30 +684,29 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
#endif
}
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
+ blkg_rwstat_add(&cfqg->stats.queued, op, -1);
}
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags)
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op)
{
- blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
+ blkg_rwstat_add(&cfqg->stats.merged, op, 1);
}
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags)
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op)
{
struct cfqg_stats *stats = &cfqg->stats;
unsigned long long now = sched_clock();
if (time_after64(now, io_start_time))
- blkg_rwstat_add(&stats->service_time, op, op_flags,
- now - io_start_time);
+ blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
if (time_after64(io_start_time, start_time))
- blkg_rwstat_add(&stats->wait_time, op, op_flags,
+ blkg_rwstat_add(&stats->wait_time, op,
io_start_time - start_time);
}
@@ -786,16 +785,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, int op, int op_flags) { }
+ struct cfq_group *curr_cfqg, unsigned int op) { }
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
- int op_flags) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
- int op_flags) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+ unsigned int op) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+ unsigned int op) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- uint64_t start_time, uint64_t io_start_time, int op,
- int op_flags) { }
+ uint64_t start_time, uint64_t io_start_time,
+ unsigned int op) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -2474,10 +2473,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
- req_op(rq), rq->cmd_flags);
+ rq->cmd_flags);
}
static struct request *
@@ -2530,7 +2529,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -2565,7 +2564,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
}
static void
@@ -2588,7 +2587,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -4142,7 +4141,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -4240,8 +4239,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
- rq_io_start_time_ns(rq), req_op(rq),
- rq->cmd_flags);
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -4319,14 +4317,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_schedule_dispatch(cfqd);
}
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
+static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
{
/*
* If REQ_PRIO is set, boost class and prio level, if it's below
* BE/NORM. If prio is not set, restore the potentially boosted
* class/prio level.
*/
- if (!(op_flags & REQ_PRIO)) {
+ if (!(op & REQ_PRIO)) {
cfqq->ioprio_class = cfqq->org_ioprio_class;
cfqq->ioprio = cfqq->org_ioprio;
} else {
@@ -4347,7 +4345,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
+static int cfq_may_queue(struct request_queue *q, unsigned int op)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -4364,10 +4362,10 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
if (!cic)
return ELV_MQUEUE_MAY;
- cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
+ cfqq = cic_to_cfqq(cic, op_is_sync(op));
if (cfqq) {
cfq_init_prio_data(cfqq, cic);
- cfqq_boost_on_prio(cfqq, op_flags);
+ cfqq_boost_on_prio(cfqq, op);
return __cfq_may_queue(cfqq);
}
diff --git a/block/elevator.c b/block/elevator.c
index ac80f89a0842..a18a5db274e4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -714,12 +714,12 @@ void elv_put_request(struct request_queue *q, struct request *rq)
e->type->ops.elevator_put_req_fn(rq);
}
-int elv_may_queue(struct request_queue *q, int op, int op_flags)
+int elv_may_queue(struct request_queue *q, unsigned int op)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_may_queue_fn)
- return e->type->ops.elevator_may_queue_fn(q, op, op_flags);
+ return e->type->ops.elevator_may_queue_fn(q, op);
return ELV_MQUEUE_MAY;
}