summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-28 09:32:57 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 20:29:23 +0200
commitcb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880 (patch)
treefec08d228e25103962c56f8cd43727d9d6af23c8 /block
parent[PATCH] Remove ->rq_status from struct request (diff)
downloadlinux-cb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880.tar.xz
linux-cb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880.zip
[PATCH] Drop useless bio passing in may_queue/set_request API
It's not needed for anything, so kill the bio passing. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/cfq-iosched.c5
-rw-r--r--block/elevator.c9
-rw-r--r--block/ll_rw_blk.c9
4 files changed, 11 insertions, 14 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 02eb9333898f..66015bc79e6f 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1284,7 +1284,7 @@ static void as_work_handler(void *data)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
+static int as_may_queue(request_queue_t *q, int rw)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c5fd9c2c205..2ac35aacbbf9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1752,7 +1752,7 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
+static int cfq_may_queue(request_queue_t *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -1817,8 +1817,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
+cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
diff --git a/block/elevator.c b/block/elevator.c
index 924b81b08f86..788d2d81994c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -796,13 +796,12 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
-int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
+int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_set_req_fn)
- return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
+ return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
rq->elevator_private = NULL;
return 0;
@@ -816,12 +815,12 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(q, rq);
}
-int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
+int elv_may_queue(request_queue_t *q, int rw)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_may_queue_fn)
- return e->ops->elevator_may_queue_fn(q, rw, bio);
+ return e->ops->elevator_may_queue_fn(q, rw);
return ELV_MQUEUE_MAY;
}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index b94a396aa624..b1ea941f6dc3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2003,8 +2003,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
}
static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
- int priv, gfp_t gfp_mask)
+blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -2018,7 +2017,7 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
rq->cmd_flags = rw | REQ_ALLOCED;
if (priv) {
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+ if (unlikely(elv_set_request(q, rq, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
}
@@ -2109,7 +2108,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
struct io_context *ioc = NULL;
int may_queue, priv;
- may_queue = elv_may_queue(q, rw, bio);
+ may_queue = elv_may_queue(q, rw);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
@@ -2157,7 +2156,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
+ rq = blk_alloc_request(q, rw, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything