diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2016-11-29 11:09:15 +0100 |
---|---|---|
committer | Ulf Hansson <ulf.hansson@linaro.org> | 2016-12-05 10:31:07 +0100 |
commit | c5bda0ca6fab8e040c8ea3c71fdd16deee0f132f (patch) | |
tree | d1cf7272bc96b59f524048e8788397dc898c76f2 /drivers/mmc | |
parent | mmc: queue: Factor out mmc_queue_reqs_free_bufs() (diff) | |
download | linux-c5bda0ca6fab8e040c8ea3c71fdd16deee0f132f.tar.xz linux-c5bda0ca6fab8e040c8ea3c71fdd16deee0f132f.zip |
mmc: queue: Introduce queue depth and use it to allocate and free
Add a mmc_queue member to record the size of the queue, which currently
supports 2 requests on-the-go at a time. Instead of allocating resources
for 2 slots in the queue, allow for an arbitrary number.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/card/queue.c | 115 | ||||
-rw-r--r-- | drivers/mmc/card/queue.h | 3 |
2 files changed, 59 insertions, 59 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 8ba82cf5feff..32ab5a1947b0 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -190,87 +190,76 @@ static void mmc_queue_setup_discard(struct request_queue *q, static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, unsigned int bouncesz) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - - mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_cur->bounce_buf) { - pr_warn("%s: unable to allocate bounce cur buffer\n", - mmc_card_name(mq->card)); - return false; - } + int i; - mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_prev->bounce_buf) { - pr_warn("%s: unable to allocate bounce prev buffer\n", - mmc_card_name(mq->card)); - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - return false; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mq->mqrq[i].bounce_buf) + goto out_err; } return true; + +out_err: + while (--i >= 0) { + kfree(mq->mqrq[i].bounce_buf); + mq->mqrq[i].bounce_buf = NULL; + } + pr_warn("%s: unable to allocate bounce buffers\n", + mmc_card_name(mq->card)); + return false; } static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, unsigned int bouncesz) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - int ret; - - mqrq_cur->sg = mmc_alloc_sg(1, &ret); - if (ret) - return ret; - - mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); - if (ret) - return ret; + int i, ret; - mqrq_prev->sg = mmc_alloc_sg(1, &ret); - if (ret) - return ret; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); + if (ret) + return ret; - mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) + return ret; + } - return ret; + return 0; } #endif static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - int ret; + int i, ret; - mqrq_cur->sg = mmc_alloc_sg(max_segs, &ret); - if (ret) - return ret; + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); + if (ret) + return ret; + } - mqrq_prev->sg = mmc_alloc_sg(max_segs, &ret); + return 0; +} - return ret; +static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) +{ + kfree(mqrq->bounce_sg); + mqrq->bounce_sg = NULL; + + kfree(mqrq->sg); + mqrq->sg = NULL; + + kfree(mqrq->bounce_buf); + mqrq->bounce_buf = NULL; } static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) { - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; - - kfree(mqrq_cur->bounce_sg); - mqrq_cur->bounce_sg = NULL; - kfree(mqrq_prev->bounce_sg); - mqrq_prev->bounce_sg = NULL; - - kfree(mqrq_cur->sg); - mqrq_cur->sg = NULL; - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - - kfree(mqrq_prev->sg); - mqrq_prev->sg = NULL; - kfree(mqrq_prev->bounce_buf); - mqrq_prev->bounce_buf = NULL; + int i; + + for (i = 0; i < mq->qdepth; i++) + mmc_queue_req_free_bufs(&mq->mqrq[i]); } /** @@ -288,7 +277,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; bool bounce = false; - int ret; + int ret = -ENOMEM; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -298,6 +287,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (!mq->queue) return -ENOMEM; + mq->qdepth = 2; + mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), + GFP_KERNEL); + if (!mq->mqrq) + goto blk_cleanup; mq->mqrq_cur = &mq->mqrq[0]; mq->mqrq_prev = &mq->mqrq[1]; mq->queue->queuedata = mq; @@ -362,6 +356,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, cleanup_queue: mmc_queue_reqs_free_bufs(mq); + kfree(mq->mqrq); + mq->mqrq = NULL; +blk_cleanup: blk_cleanup_queue(mq->queue); return ret; } @@ -384,6 +381,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) spin_unlock_irqrestore(q->queue_lock, flags); mmc_queue_reqs_free_bufs(mq); + kfree(mq->mqrq); + mq->mqrq = NULL; mq->card = NULL; } diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d09fce655800..dac8c3d010dd 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -42,9 +42,10 @@ struct mmc_queue { bool asleep; struct mmc_blk_data *blkdata; struct request_queue *queue; - struct mmc_queue_req mqrq[2]; + struct mmc_queue_req *mqrq; struct mmc_queue_req *mqrq_cur; struct mmc_queue_req *mqrq_prev; + int qdepth; }; extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, |