diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-07-04 10:35:44 +0200 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-07-04 10:35:52 +0200 |
commit | 5a5e02a614e59db7536cd11029e6674adc41b191 (patch) | |
tree | 11f56f4ca2ea02f14e5cdfaae89e034db23edb28 /drivers/pci/intel-iommu.c | |
parent | Merge git://git.infradead.org/iommu-2.6 (diff) | |
download | linux-5a5e02a614e59db7536cd11029e6674adc41b191.tar.xz linux-5a5e02a614e59db7536cd11029e6674adc41b191.zip |
intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()
The aligned_nrpages() function rounds up to the next VM page, but
returns its result as a number of DMA pages.
Purely theoretical except on IA64, which doesn't boot with VT-d right
now anyway.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 53075424a434..ad85e95d2dcc 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -2368,15 +2368,15 @@ error: return ret; } +/* Returns a number of VTD pages, but aligned to MM page size */ static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) { host_addr &= ~PAGE_MASK; - host_addr += size + PAGE_SIZE - 1; - - return host_addr >> VTD_PAGE_SHIFT; + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; } +/* This takes a number of _MM_ pages, not VTD pages */ static struct iova *intel_alloc_iova(struct device *dev, struct dmar_domain *domain, unsigned long nrpages, uint64_t dma_mask) @@ -2506,7 +2506,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); - iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) goto error; @@ -2797,7 +2798,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne for_each_sg(sglist, sg, nelems, i) size += aligned_nrpages(sg->offset, sg->length); - iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; |