diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 01:43:27 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-15 01:43:27 +0100 |
commit | 2cd83ba5bede2f72cc6c79a19a1bddf576b50e88 (patch) | |
tree | 6a02f6f93f90f3fea419c3a283ced0543b603fd4 /drivers/iommu/amd_iommu.c | |
parent | Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi (diff) | |
parent | Merge branches 'iommu/arm/smmu', 'iommu/updates', 'iommu/vt-d', 'iommu/ipmmu-... (diff) | |
download | linux-2cd83ba5bede2f72cc6c79a19a1bddf576b50e88.tar.xz linux-2cd83ba5bede2f72cc6c79a19a1bddf576b50e88.zip |
Merge tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio
Pull IOMMU updates from Alex Williamson:
"As Joerg mentioned[1], he's out on paternity leave through the end of
the year and I'm filling in for him in the interim:
- Enforce MSI multiple IRQ alignment in AMD IOMMU
- VT-d PASID error handling fixes
- Add r8a7795 IPMMU support
- Manage runtime PM links on exynos at {add,remove}_device callbacks
- Fix Mediatek driver name to avoid conflict
- Add terminate support to qcom fault handler
- 64-bit IOVA optimizations
- Simplfy IOVA domain destruction, better use of rcache, and skip
anchor nodes on copy
- Convert to IOMMU TLB sync API in io-pgtable-arm{-v7s}
- Drop command queue lock when waiting for CMD_SYNC completion on ARM
SMMU implementations supporting MSI to cacheable memory
- iomu-vmsa cleanup inspired by missed IOTLB sync callbacks
- Fix sleeping lock with preemption disabled for RT
- Dual MMU support for TI DRA7xx DSPs
- Optional flush option on IOVA allocation avoiding overhead when
caller can try other options
[1] https://lkml.org/lkml/2017/10/22/72"
* tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio: (54 commits)
iommu/iova: Use raw_cpu_ptr() instead of get_cpu_ptr() for ->fq
iommu/mediatek: Fix driver name
iommu/ipmmu-vmsa: Hook up r8a7795 DT matching code
iommu/ipmmu-vmsa: Allow two bit SL0
iommu/ipmmu-vmsa: Make IMBUSCTR setup optional
iommu/ipmmu-vmsa: Write IMCTR twice
iommu/ipmmu-vmsa: IPMMU device is 40-bit bus master
iommu/ipmmu-vmsa: Make use of IOMMU_OF_DECLARE()
iommu/ipmmu-vmsa: Enable multi context support
iommu/ipmmu-vmsa: Add optional root device feature
iommu/ipmmu-vmsa: Introduce features, break out alias
iommu/ipmmu-vmsa: Unify ipmmu_ops
iommu/ipmmu-vmsa: Clean up struct ipmmu_vmsa_iommu_priv
iommu/ipmmu-vmsa: Simplify group allocation
iommu/ipmmu-vmsa: Unify domain alloc/free
iommu/ipmmu-vmsa: Fix return value check in ipmmu_find_group_dma()
iommu/vt-d: Clear pasid table entry when memory unbound
iommu/vt-d: Clear Page Request Overflow fault bit
iommu/vt-d: Missing checks for pasid tables if allocation fails
iommu/amd: Limit the IOVA page range to the specified addresses
...
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 43 |
1 files changed, 24 insertions, 19 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 9c848e36f209..7d5eb004091d 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -63,7 +63,6 @@ /* IO virtual address start page frame number */ #define IOVA_START_PFN (1) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) /* Reserved IOVA ranges */ #define MSI_RANGE_START (0xfee00000) @@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev, if (dma_mask > DMA_BIT_MASK(32)) pfn = alloc_iova_fast(&dma_dom->iovad, pages, - IOVA_PFN(DMA_BIT_MASK(32))); + IOVA_PFN(DMA_BIT_MASK(32)), false); if (!pfn) - pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask)); + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(dma_mask), true); return (pfn << PAGE_SHIFT); } @@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (!dma_dom->domain.pt_root) goto free_dma_dom; - init_iova_domain(&dma_dom->iovad, PAGE_SIZE, - IOVA_START_PFN, DMA_32BIT_PFN); + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN); if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) goto free_dma_dom; @@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, size_t size, int dir) { - dma_addr_t flush_addr; dma_addr_t i, start; unsigned int pages; - flush_addr = dma_addr; pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr &= PAGE_MASK; start = dma_addr; @@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void) struct pci_dev *pdev = NULL; struct iova *val; - init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, - IOVA_START_PFN, DMA_32BIT_PFN); + init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN); lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, &reserved_rbtree_key); @@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, unsigned long start, end; start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length); + end = IOVA_PFN(region->start + region->length - 1); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } @@ -3663,11 +3659,11 @@ out_unlock: return table; } -static int alloc_irq_index(u16 devid, int count) +static int alloc_irq_index(u16 devid, int count, bool align) { struct irq_remap_table *table; + int index, c, alignment = 1; unsigned long flags; - int index, c; struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; if (!iommu) @@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count) if (!table) return -ENODEV; + if (align) + alignment = roundup_pow_of_two(count); + spin_lock_irqsave(&table->lock, flags); /* Scan table for free entries */ - for (c = 0, index = table->min_index; - index < MAX_IRQS_PER_TABLE; - ++index) { - if (!iommu->irte_ops->is_allocated(table, index)) + for (index = ALIGN(table->min_index, alignment), c = 0; + index < MAX_IRQS_PER_TABLE;) { + if (!iommu->irte_ops->is_allocated(table, index)) { c += 1; - else - c = 0; + } else { + c = 0; + index = ALIGN(index + 1, alignment); + continue; + } if (c == count) { for (; c != 0; --c) @@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count) index -= count - 1; goto out; } + + index++; } index = -ENOSPC; @@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, else ret = -ENOMEM; } else { - index = alloc_irq_index(devid, nr_irqs); + bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); + + index = alloc_irq_index(devid, nr_irqs, align); } if (index < 0) { pr_warn("Failed to allocate IRTE\n"); |