summaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-11-09 15:50:28 +0100
committerChristoph Hellwig <hch@lst.de>2021-12-08 16:46:35 +0100
commit28e4576d556bca543b0996e9edd4b767397e24c6 (patch)
tree071f859a8ec6385f0b80ad3c1ed2a11b73cffdaf /kernel/dma
parentdma-direct: factor the swiotlb code out of __dma_direct_alloc_pages (diff)
downloadlinux-28e4576d556bca543b0996e9edd4b767397e24c6.tar.xz
linux-28e4576d556bca543b0996e9edd4b767397e24c6.zip
dma-direct: add a dma_direct_use_pool helper
Add a helper to check if a potentially blocking operation should dip into the atomic pools. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/direct.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 924937c54e8a..50f48e9e4598 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -156,6 +156,15 @@ again:
return page;
}
+/*
+ * Check if a potentially blocking operations needs to dip into the atomic
+ * pools for the given device/gfp.
+ */
+static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
+{
+ return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
+}
+
static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -235,8 +244,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
*/
remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
if (remap) {
- if (!gfpflags_allow_blocking(gfp) &&
- !is_swiotlb_for_alloc(dev))
+ if (dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size,
dma_handle, gfp);
} else {
@@ -250,8 +258,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
* Decrypting memory may block, so allocate the memory from the atomic
* pools if we can't block.
*/
- if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
- !is_swiotlb_for_alloc(dev))
+ if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@@ -360,8 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
- !is_swiotlb_for_alloc(dev))
+ if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp);