diff options
author | Paul Durrant <paul.durrant@citrix.com> | 2018-05-09 15:16:12 +0200 |
---|---|---|
committer | Juergen Gross <jgross@suse.com> | 2018-05-14 15:25:37 +0200 |
commit | 3ad0876554cafa368f574d4d408468510543e9ff (patch) | |
tree | 7fade80924630151265270e68355d3f68b029bad /arch/x86/xen | |
parent | xen: Change return type to vm_fault_t (diff) | |
download | linux-3ad0876554cafa368f574d4d408468510543e9ff.tar.xz linux-3ad0876554cafa368f574d4d408468510543e9ff.zip |
xen/privcmd: add IOCTL_PRIVCMD_MMAP_RESOURCE
My recent Xen patch series introduces a new HYPERVISOR_memory_op to
support direct priv-mapping of certain guest resources (such as ioreq
pages, used by emulators) by a tools domain, rather than having to access
such resources via the guest P2M.
This patch adds the necessary infrastructure to the privcmd driver and
Xen MMU code to support direct resource mapping.
NOTE: The adjustment in the MMU code is partially cosmetic. Xen will now
allow a PV tools domain to map guest pages either by GFN or MFN, thus
the term 'mfn' has been swapped for 'pfn' in the lower layers of the
remap code.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/mmu.c | 60 |
1 files changed, 43 insertions, 17 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d33e7dbe3129..af2960cb7a3e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -65,37 +65,44 @@ static void xen_flush_tlb_all(void) #define REMAP_BATCH_SIZE 16 struct remap_data { - xen_pfn_t *mfn; + xen_pfn_t *pfn; bool contiguous; + bool no_translate; pgprot_t prot; struct mmu_update *mmu_update; }; -static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, +static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *rmd = data; - pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); + pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); - /* If we have a contiguous range, just update the mfn itself, - else update pointer to be "next mfn". */ + /* + * If we have a contiguous range, just update the pfn itself, + * else update pointer to be "next pfn". + */ if (rmd->contiguous) - (*rmd->mfn)++; + (*rmd->pfn)++; else - rmd->mfn++; + rmd->pfn++; - rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr |= rmd->no_translate ? + MMU_PT_UPDATE_NO_TRANSLATE : + MMU_NORMAL_PT_UPDATE; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; return 0; } -static int do_remap_gfn(struct vm_area_struct *vma, +static int do_remap_pfn(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t *gfn, int nr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, - unsigned domid, + unsigned int domid, + bool no_translate, struct page **pages) { int err = 0; @@ -106,11 +113,14 @@ static int do_remap_gfn(struct vm_area_struct *vma, BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); - rmd.mfn = gfn; + rmd.pfn = pfn; rmd.prot = prot; - /* We use the err_ptr to indicate if there we are doing a contiguous - * mapping or a discontigious mapping. */ + /* + * We use the err_ptr to indicate if there we are doing a contiguous + * mapping or a discontigious mapping. + */ rmd.contiguous = !err_ptr; + rmd.no_translate = no_translate; while (nr) { int index = 0; @@ -121,7 +131,7 @@ static int do_remap_gfn(struct vm_area_struct *vma, rmd.mmu_update = mmu_update; err = apply_to_page_range(vma->vm_mm, addr, range, - remap_area_mfn_pte_fn, &rmd); + remap_area_pfn_pte_fn, &rmd); if (err) goto out; @@ -175,7 +185,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma, if (xen_feature(XENFEAT_auto_translated_physmap)) return -EOPNOTSUPP; - return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); + return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, + pages); } EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); @@ -194,10 +205,25 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma, * cause of "wrong memory was mapped in". */ BUG_ON(err_ptr == NULL); - return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); + return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, + false, pages); } EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); +int xen_remap_domain_mfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *mfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, struct page **pages) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -EOPNOTSUPP; + + return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, + true, pages); +} +EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); + /* Returns: 0 success */ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) |