diff options
author | Halil Pasic <pasic@linux.ibm.com> | 2018-09-13 18:57:16 +0200 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2019-06-15 12:24:51 +0200 |
commit | 64e1f0c531d1072cd97939bf0d8df42b26713543 (patch) | |
tree | e98f6f8de50fd7bf6f68627f558469c3b3f3915b /arch/s390/mm | |
parent | s390/crypto: sha: Use -ENODEV instead of -EOPNOTSUPP (diff) | |
download | linux-64e1f0c531d1072cd97939bf0d8df42b26713543.tar.xz linux-64e1f0c531d1072cd97939bf0d8df42b26713543.zip |
s390/mm: force swiotlb for protected virtualization
On s390, protected virtualization guests have to use bounced I/O
buffers. That requires some plumbing.
Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.
Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
Tested-by: Michael Mueller <mimu@linux.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 14d1eae9fe43..f0bee6af3960 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -18,6 +18,7 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> +#include <linux/swiotlb.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/pagemap.h> @@ -29,6 +30,7 @@ #include <linux/export.h> #include <linux/cma.h> #include <linux/gfp.h> +#include <linux/dma-mapping.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/pgtable.h> @@ -42,6 +44,8 @@ #include <asm/sclp.h> #include <asm/set_memory.h> #include <asm/kasan.h> +#include <asm/dma-mapping.h> +#include <asm/uv.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); @@ -128,6 +132,47 @@ void mark_rodata_ro(void) pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); } +int set_memory_encrypted(unsigned long addr, int numpages) +{ + int i; + + /* make specified pages unshared, (swiotlb, dma_free) */ + for (i = 0; i < numpages; ++i) { + uv_remove_shared(addr); + addr += PAGE_SIZE; + } + return 0; +} + +int set_memory_decrypted(unsigned long addr, int numpages) +{ + int i; + /* make specified pages shared (swiotlb, dma_alloca) */ + for (i = 0; i < numpages; ++i) { + uv_set_shared(addr); + addr += PAGE_SIZE; + } + return 0; +} + +/* are we a protected virtualization guest? */ +bool sev_active(void) +{ + return is_prot_virt_guest(); +} + +/* protected virtualization */ +static void pv_init(void) +{ + if (!is_prot_virt_guest()) + return; + + /* make sure bounce buffers are shared */ + swiotlb_init(1); + swiotlb_update_mem_attributes(); + swiotlb_force = SWIOTLB_FORCE; +} + void __init mem_init(void) { cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); @@ -136,6 +181,8 @@ void __init mem_init(void) set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + pv_init(); + /* Setup guest page hinting */ cmma_init(); |