summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2010-05-25 19:15:15 +0200
committerJens Axboe <jaxboe@fusionio.com>2010-06-04 13:47:06 +0200
commit1abec4fdbb142e3ccb6ce99832fae42129134a96 (patch)
tree5adffee38a35b3639282da545846171715e9d689 /block/blk-core.c
parentblock: avoid unconditionally freeing previously allocated request_queue (diff)
downloadlinux-1abec4fdbb142e3ccb6ce99832fae42129134a96.tar.xz
linux-1abec4fdbb142e3ccb6ce99832fae42129134a96.zip
block: make blk_init_free_list and elevator_init idempotent
blk_init_allocated_queue_node may fail and the caller _could_ retry. Accommodate the unlikely event that blk_init_allocated_queue_node is called on an already initialized (possibly partially) request_queue. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 826d07078902..f84cce42fc58 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -467,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;
+ if (unlikely(rl->rq_pool))
+ return 0;
+
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
rl->elvpriv = 0;