summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>2019-09-12 06:13:55 +0200
committerChristoph Hellwig <hch@lst.de>2019-09-12 14:14:09 +0200
commit427b00342c5a3ebcf31fac2ce3b21fb993952816 (patch)
tree5b2a6de70bcea202ed9d516e1fc82aef6b4cb0b7 /drivers/mmc
parentarm64: use asm-generic/dma-mapping.h (diff)
downloadlinux-427b00342c5a3ebcf31fac2ce3b21fb993952816.tar.xz
linux-427b00342c5a3ebcf31fac2ce3b21fb993952816.zip
mmc: queue: Fix bigger segments usage
The commit 38c38cb73223 ("mmc: queue: use bigger segments if DMA MAP layer can merge the segments") always enables the bugger segments if DMA MAP layer can merge the segments, but some controllers (SDHCI) have strictly limitation about the segments size, and then the commit breaks on the controllers. To fix the issue, this patch adds a new flag MMC_CAP2_MERGE_CAPABLE into the struct mmc_host and the bigger segments usage is disabled as default. Reported-by: Thierry Reding <treding@nvidia.com> Fixes: 38c38cb73223 ("mmc: queue: use bigger segments if DMA MAP layer can merge the segments") Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Acked-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/queue.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 1e29b305767e..9edc08685e86 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -399,6 +399,11 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
}
+static inline bool mmc_merge_capable(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
+}
+
/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64
@@ -441,7 +446,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
* the host->can_dma_map_merge should be set before to get max_segs
* from mmc_get_max_segments().
*/
- if (host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
+ if (mmc_merge_capable(host) &&
+ host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
dma_get_merge_boundary(mmc_dev(host)))
host->can_dma_map_merge = 1;
else