diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-02-19 07:07:37 +0100 |
---|---|---|
committer | Joerg Roedel <joro@8bytes.org> | 2014-03-04 17:51:06 +0100 |
commit | 75f05569d0e51f6332a291c82abbeb7c8262e32d (patch) | |
tree | 24eeca4b270e4f0c3ca857c1993b109e685f1588 /drivers/iommu/iova.c | |
parent | iommu/vt-d: Unify the way to process DMAR device scope array (diff) | |
download | linux-75f05569d0e51f6332a291c82abbeb7c8262e32d.tar.xz linux-75f05569d0e51f6332a291c82abbeb7c8262e32d.zip |
iommu/vt-d: Update IOMMU state when memory hotplug happens
If static identity domain is created, IOMMU driver needs to update
si_domain page table when memory hotplug event happens. Otherwise
PCI device DMA operations can't access the hot-added memory regions.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r-- | drivers/iommu/iova.c | 64 |
1 files changed, 58 insertions, 6 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 67da6cff74e8..f6b17e6af2fb 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node, return 0; } +static inline struct iova * +alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) +{ + struct iova *iova; + + iova = alloc_iova_mem(); + if (iova) { + iova->pfn_lo = pfn_lo; + iova->pfn_hi = pfn_hi; + } + + return iova; +} + static struct iova * __insert_new_range(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova; - iova = alloc_iova_mem(); - if (!iova) - return iova; + iova = alloc_and_init_iova(pfn_lo, pfn_hi); + if (iova) + iova_insert_rbtree(&iovad->rbroot, iova); - iova->pfn_hi = pfn_hi; - iova->pfn_lo = pfn_lo; - iova_insert_rbtree(&iovad->rbroot, iova); return iova; } @@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } + +struct iova * +split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, + unsigned long pfn_lo, unsigned long pfn_hi) +{ + unsigned long flags; + struct iova *prev = NULL, *next = NULL; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + if (iova->pfn_lo < pfn_lo) { + prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); + if (prev == NULL) + goto error; + } + if (iova->pfn_hi > pfn_hi) { + next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); + if (next == NULL) + goto error; + } + + __cached_rbnode_delete_update(iovad, iova); + rb_erase(&iova->node, &iovad->rbroot); + + if (prev) { + iova_insert_rbtree(&iovad->rbroot, prev); + iova->pfn_lo = pfn_lo; + } + if (next) { + iova_insert_rbtree(&iovad->rbroot, next); + iova->pfn_hi = pfn_hi; + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + return iova; + +error: + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + if (prev) + free_iova_mem(prev); + return NULL; +} |