summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-01-03 12:52:44 +0100
committerJens Axboe <axboe@fb.com>2017-01-27 23:08:35 +0100
commit5ea708d15a928f7a479987704203616d3274c03b (patch)
tree0b1595694cc628134ccb108f256beed8831887bf /block
parentblock: fix elevator init check (diff)
downloadlinux-5ea708d15a928f7a479987704203616d3274c03b.tar.xz
linux-5ea708d15a928f7a479987704203616d3274c03b.zip
block: simplify blk_init_allocated_queue
Return an errno value instead of the passed in queue so that the callers don't have to keep track of two queues, and move the assignment of the request_fn and lock to the caller as passing them as argument doesn't simplify anything. While we're at it also remove two pointless NULL assignments, given that the request structure is zeroed on allocation. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c38
1 files changed, 15 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4cad535592c3..09819d24d385 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -823,15 +823,19 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- struct request_queue *uninit_q, *q;
+ struct request_queue *q;
- uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
- if (!uninit_q)
+ q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ if (!q)
return NULL;
- q = blk_init_allocated_queue(uninit_q, rfn, lock);
- if (!q)
- blk_cleanup_queue(uninit_q);
+ q->request_fn = rfn;
+ if (lock)
+ q->queue_lock = lock;
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
return q;
}
@@ -839,30 +843,19 @@ EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-struct request_queue *
-blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock)
-{
- if (!q)
- return NULL;
+int blk_init_allocated_queue(struct request_queue *q)
+{
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
if (!q->fq)
- return NULL;
+ return -ENOMEM;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail;
INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->request_fn = rfn;
- q->prep_rq_fn = NULL;
- q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT;
- /* Override internal queue lock with supplied lock pointer */
- if (lock)
- q->queue_lock = lock;
-
/*
* This also sets hw/phys segments, boundary and size
*/
@@ -880,13 +873,12 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
}
mutex_unlock(&q->sysfs_lock);
-
- return q;
+ return 0;
fail:
blk_free_flush_queue(q->fq);
wbt_exit(q);
- return NULL;
+ return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);