summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 02:18:45 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 02:18:45 +0200
commit2fcd2b306aa80771e053275ed74b2dfe7e3d1434 (patch)
treeb19f4e3e9552eab00056c833650e692192fe8f5c /lib
parentMerge branch 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/ker... (diff)
parentdma-mapping: Don't clear GFP_ZERO in dma_alloc_attrs (diff)
downloadlinux-2fcd2b306aa80771e053275ed74b2dfe7e3d1434.tar.xz
linux-2fcd2b306aa80771e053275ed74b2dfe7e3d1434.zip
Merge branch 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 dma mapping updates from Ingo Molnar: "This tree, by Christoph Hellwig, switches over the x86 architecture to the generic dma-direct and swiotlb code, and also unifies more of the dma-direct code between architectures. The now unused x86-only primitives are removed" * 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: dma-mapping: Don't clear GFP_ZERO in dma_alloc_attrs swiotlb: Make swiotlb_{alloc,free}_buffer depend on CONFIG_DMA_DIRECT_OPS dma/swiotlb: Remove swiotlb_{alloc,free}_coherent() dma/direct: Handle force decryption for DMA coherent buffers in common code dma/direct: Handle the memory encryption bit in common code dma/swiotlb: Remove swiotlb_set_mem_attributes() set_memory.h: Provide set_memory_{en,de}crypted() stubs x86/dma: Remove dma_alloc_coherent_gfp_flags() iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent() iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}() x86/dma/amd_gart: Use dma_direct_{alloc,free}() x86/dma/amd_gart: Look at dev->coherent_dma_mask instead of GFP_DMA x86/dma: Use generic swiotlb_ops x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y) x86/dma: Remove dma_alloc_coherent_mask()
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-direct.c35
-rw-r--r--lib/swiotlb.c77
2 files changed, 46 insertions, 66 deletions
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c9e8e21cb334..c0bba30fef0a 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -9,6 +9,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-contiguous.h>
#include <linux/pfn.h>
+#include <linux/set_memory.h>
#define DIRECT_MAPPING_ERROR 0
@@ -20,6 +21,14 @@
#define ARCH_ZONE_DMA_BITS 24
#endif
+/*
+ * For AMD SEV all DMA must be to unencrypted addresses.
+ */
+static inline bool force_dma_unencrypted(void)
+{
+ return sev_active();
+}
+
static bool
check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
const char *caller)
@@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
- return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
+ dma_addr_t addr = force_dma_unencrypted() ?
+ __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
+ return addr + size - 1 <= dev->coherent_dma_mask;
}
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
@@ -46,6 +57,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size);
struct page *page = NULL;
+ void *ret;
+
+ /* we always manually zero the memory once we are done: */
+ gfp &= ~__GFP_ZERO;
/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
@@ -78,10 +93,15 @@ again:
if (!page)
return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- memset(page_address(page), 0, size);
- return page_address(page);
+ ret = page_address(page);
+ if (force_dma_unencrypted()) {
+ set_memory_decrypted((unsigned long)ret, 1 << page_order);
+ *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+ } else {
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ }
+ memset(ret, 0, size);
+ return ret;
}
/*
@@ -92,9 +112,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int page_order = get_order(size);
+ if (force_dma_unencrypted())
+ set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
- free_pages((unsigned long)cpu_addr, get_order(size));
+ free_pages((unsigned long)cpu_addr, page_order);
}
static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c43ec2271469..47aeb04c1997 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
+#include <linux/set_memory.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -156,22 +157,6 @@ unsigned long swiotlb_size_or_default(void)
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
-void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
-
-/* For swiotlb, clear memory encryption mask from dma addresses */
-static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
- phys_addr_t address)
-{
- return __sme_clr(phys_to_dma(hwdev, address));
-}
-
-/* Note that this doesn't work with highmem page */
-static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
- volatile void *address)
-{
- return phys_to_dma(hwdev, virt_to_phys(address));
-}
-
static bool no_iotlb_memory;
void swiotlb_print_info(void)
@@ -209,12 +194,12 @@ void __init swiotlb_update_mem_attributes(void)
vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
- swiotlb_set_mem_attributes(vaddr, bytes);
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
vaddr = phys_to_virt(io_tlb_overflow_buffer);
bytes = PAGE_ALIGN(io_tlb_overflow);
- swiotlb_set_mem_attributes(vaddr, bytes);
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
}
@@ -355,7 +340,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
- swiotlb_set_mem_attributes(tlb, bytes);
+ set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes);
/*
@@ -366,7 +351,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!v_overflow_buffer)
goto cleanup2;
- swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
+ set_memory_decrypted((unsigned long)v_overflow_buffer,
+ io_tlb_overflow >> PAGE_SHIFT);
memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
@@ -622,7 +608,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
return SWIOTLB_MAP_ERROR;
}
- start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
+ start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
@@ -706,6 +692,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
}
}
+#ifdef CONFIG_DMA_DIRECT_OPS
static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
size_t size)
{
@@ -726,12 +713,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
goto out_warn;
phys_addr = swiotlb_tbl_map_single(dev,
- swiotlb_phys_to_dma(dev, io_tlb_start),
+ __phys_to_dma(dev, io_tlb_start),
0, size, DMA_FROM_DEVICE, 0);
if (phys_addr == SWIOTLB_MAP_ERROR)
goto out_warn;
- *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
+ *dma_handle = __phys_to_dma(dev, phys_addr);
if (dma_coherent_ok(dev, *dma_handle, size))
goto out_unmap;
@@ -759,28 +746,6 @@ out_warn:
return NULL;
}
-void *
-swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
-{
- int order = get_order(size);
- unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
- void *ret;
-
- ret = (void *)__get_free_pages(flags, order);
- if (ret) {
- *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
- if (dma_coherent_ok(hwdev, *dma_handle, size)) {
- memset(ret, 0, size);
- return ret;
- }
- free_pages((unsigned long)ret, order);
- }
-
- return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
-}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-
static bool swiotlb_free_buffer(struct device *dev, size_t size,
dma_addr_t dma_addr)
{
@@ -799,15 +764,7 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
DMA_ATTR_SKIP_CPU_SYNC);
return true;
}
-
-void
-swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr)
-{
- if (!swiotlb_free_buffer(hwdev, size, dev_addr))
- free_pages((unsigned long)vaddr, get_order(size));
-}
-EXPORT_SYMBOL(swiotlb_free_coherent);
+#endif
static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
@@ -867,10 +824,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
swiotlb_full(dev, size, dir, 1);
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
+ return __phys_to_dma(dev, io_tlb_overflow_buffer);
}
- dev_addr = swiotlb_phys_to_dma(dev, map);
+ dev_addr = __phys_to_dma(dev, map);
/* Ensure that the address returned is DMA'ble */
if (dma_capable(dev, dev_addr, size))
@@ -879,7 +836,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
- return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
+ return __phys_to_dma(dev, io_tlb_overflow_buffer);
}
/*
@@ -1009,7 +966,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
sg_dma_len(sgl) = 0;
return 0;
}
- sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
+ sg->dma_address = __phys_to_dma(hwdev, map);
} else
sg->dma_address = dev_addr;
sg_dma_len(sg) = sg->length;
@@ -1073,7 +1030,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
- return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
+ return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
/*
@@ -1085,7 +1042,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
+ return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
#ifdef CONFIG_DMA_DIRECT_OPS