summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2015-02-06 18:59:16 +0100
committerAlex Williamson <alex.williamson@redhat.com>2015-02-06 18:59:16 +0100
commitbabbf1760970f141eb4021288ce0fb7196bc1a23 (patch)
tree00f84b04dce039df80ed367ca195293ba8194ab3 /drivers/vfio
parentvfio/type1: DMA unmap chunking (diff)
downloadlinux-babbf1760970f141eb4021288ce0fb7196bc1a23.tar.xz
linux-babbf1760970f141eb4021288ce0fb7196bc1a23.zip
vfio/type1: Chunk contiguous reserved/invalid page mappings
We currently map invalid and reserved pages, such as often occur from mapping MMIO regions of a VM through the IOMMU, using single pages. There's really no reason we can't instead follow the methodology we use for normal pages and find the largest possible physically contiguous chunk for mapping. The only difference is that we don't do locked memory accounting for these since they're not back by RAM. In most applications this will be a very minor improvement, but when graphics and GPGPU devices are in play, MMIO BARs become non-trivial. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e6e7f155bdd9..35c90089478e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -265,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK);
long ret, i;
+ bool rsvd;
if (!current->mm)
return -ENODEV;
@@ -273,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
if (ret)
return ret;
- if (is_invalid_reserved_pfn(*pfn_base))
- return 1;
+ rsvd = is_invalid_reserved_pfn(*pfn_base);
- if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+ if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT);
@@ -284,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
}
if (unlikely(disable_hugepages)) {
- vfio_lock_acct(1);
+ if (!rsvd)
+ vfio_lock_acct(1);
return 1;
}
@@ -296,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
if (ret)
break;
- if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+ if (pfn != *pfn_base + i ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
put_pfn(pfn, prot);
break;
}
- if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
+ if (!rsvd && !lock_cap &&
+ current->mm->locked_vm + i + 1 > limit) {
put_pfn(pfn, prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
@@ -309,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
}
}
- vfio_lock_acct(i);
+ if (!rsvd)
+ vfio_lock_acct(i);
return i;
}