diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2015-10-24 21:06:39 +0200 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2015-10-24 21:06:39 +0200 |
commit | 5a10ba27d963bc79d6ac2e4996cdbb012195c306 (patch) | |
tree | a7d84b59269fb4b9497533080a5c9f3adda8a2f8 /drivers/iommu | |
parent | iommu/vt-d: Fix SVM IOTLB flush handling (diff) | |
download | linux-5a10ba27d963bc79d6ac2e4996cdbb012195c306.tar.xz linux-5a10ba27d963bc79d6ac2e4996cdbb012195c306.zip |
iommu/vt-d: Handle Caching Mode implementations of SVM
Not entirely clear why, but it seems we need to reserve PASID zero and
flush it when we make a PASID entry present.
Quite we we couldn't use the true PASID value, isn't clear.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel-svm.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index a584df0767e6..48a3d4a3d6c0 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -236,12 +236,12 @@ static void intel_invalidate_range(struct mmu_notifier *mn, } -static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev) +static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid) { struct qi_desc desc; desc.high = 0; - desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(svm->pasid); + desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid); qi_submit_sync(&desc, svm->iommu); } @@ -356,8 +356,10 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ if (pasid_max > 2 << ecap_pss(iommu->ecap)) pasid_max = 2 << ecap_pss(iommu->ecap); - ret = idr_alloc(&iommu->pasid_idr, svm, 0, pasid_max - 1, - GFP_KERNEL); + /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ + ret = idr_alloc(&iommu->pasid_idr, svm, + !!cap_caching_mode(iommu->cap), + pasid_max - 1, GFP_KERNEL); if (ret < 0) { kfree(svm); goto out; @@ -381,6 +383,17 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ } else iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); wmb(); + /* In caching mode, we still have to flush with PASID 0 when + * a PASID table entry becomes present. Not entirely clear + * *why* that would be the case — surely we could just issue + * a flush with the PASID value that we've changed? The PASID + * is the index into the table, after all. It's not like domain + * IDs in the case of the equivalent context-entry change in + * caching mode. And for that matter it's not entirely clear why + * a VMM would be in the business of caching the PASID table + * anyway. Surely that can be left entirely to the guest? */ + if (cap_caching_mode(iommu->cap)) + intel_flush_pasid_dev(svm, sdev, 0); } list_add_rcu(&sdev->list, &svm->devs); @@ -424,7 +437,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) * to use. We have a *shared* PASID table, because it's * large and has to be physically contiguous. So it's * hard to be as defensive as we might like. */ - intel_flush_pasid_dev(svm, sdev); + intel_flush_pasid_dev(svm, sdev, svm->pasid); intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); kfree_rcu(sdev, rcu); |