diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 30 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 34 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_proto.h | 10 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 8 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 7 | ||||
-rw-r--r-- | drivers/iommu/intel-svm.c | 9 |
7 files changed, 62 insertions, 38 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 538c16f669f9..51f8215877f5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -515,7 +515,7 @@ static void dump_dte_entry(u16 devid) static void dump_command(unsigned long phys_addr) { - struct iommu_cmd *cmd = phys_to_virt(phys_addr); + struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr); int i; for (i = 0; i < 4; ++i) @@ -859,11 +859,13 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, static void build_completion_wait(struct iommu_cmd *cmd, u64 address) { + u64 paddr = iommu_virt_to_phys((void *)address); + WARN_ON(address & 0x7ULL); memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); + cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; + cmd->data[1] = upper_32_bits(paddr); cmd->data[2] = 1; CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } @@ -1323,7 +1325,7 @@ static bool increase_address_space(struct protection_domain *domain, return false; *pte = PM_LEVEL_PDE(domain->mode, - virt_to_phys(domain->pt_root)); + iommu_virt_to_phys(domain->pt_root)); domain->pt_root = pte; domain->mode += 1; domain->updated = true; @@ -1360,7 +1362,7 @@ static u64 *alloc_pte(struct protection_domain *domain, if (!page) return NULL; - __npte = PM_LEVEL_PDE(level, virt_to_phys(page)); + __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page)); /* pte could have been changed somewhere. */ if (cmpxchg64(pte, __pte, __npte) != __pte) { @@ -1476,10 +1478,10 @@ static int iommu_map_page(struct protection_domain *dom, return -EBUSY; if (count > 1) { - __pte = PAGE_SIZE_PTE(phys_addr, page_size); + __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size); __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; } else - __pte = phys_addr | IOMMU_PTE_PR | IOMMU_PTE_FC; + __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC; if (prot & IOMMU_PROT_IR) __pte |= IOMMU_PTE_IR; @@ -1695,7 +1697,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) if (!(tbl[i] & GCR3_VALID)) continue; - ptr = __va(tbl[i] & PAGE_MASK); + ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); free_page((unsigned long)ptr); } @@ -1710,7 +1712,7 @@ static void free_gcr3_tbl_level2(u64 *tbl) if (!(tbl[i] & GCR3_VALID)) continue; - ptr = __va(tbl[i] & PAGE_MASK); + ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); free_gcr3_tbl_level1(ptr); } @@ -1820,7 +1822,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) u64 flags = 0; if (domain->mode != PAGE_MODE_NONE) - pte_root = virt_to_phys(domain->pt_root); + pte_root = iommu_virt_to_phys(domain->pt_root); pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; @@ -1832,7 +1834,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) flags |= DTE_FLAG_IOTLB; if (domain->flags & PD_IOMMUV2_MASK) { - u64 gcr3 = __pa(domain->gcr3_tbl); + u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); u64 glx = domain->glx; u64 tmp; @@ -3396,10 +3398,10 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc) if (root == NULL) return NULL; - *pte = __pa(root) | GCR3_VALID; + *pte = iommu_virt_to_phys(root) | GCR3_VALID; } - root = __va(*pte & PAGE_MASK); + root = iommu_phys_to_virt(*pte & PAGE_MASK); level -= 1; } @@ -3573,7 +3575,7 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) dte = amd_iommu_dev_table[devid].data[2]; dte &= ~DTE_IRQ_PHYS_ADDR_MASK; - dte |= virt_to_phys(table->table); + dte |= iommu_virt_to_phys(table->table); dte |= DTE_IRQ_REMAP_INTCTL; dte |= DTE_IRQ_TABLE_LEN; dte |= DTE_IRQ_REMAP_ENABLE; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index ff8887ac5555..382de42b8359 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -29,6 +29,7 @@ #include <linux/export.h> #include <linux/iommu.h> #include <linux/kmemleak.h> +#include <linux/mem_encrypt.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> @@ -376,7 +377,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu) BUG_ON(iommu->mmio_base == NULL); - entry = virt_to_phys(amd_iommu_dev_table); + entry = iommu_virt_to_phys(amd_iommu_dev_table); entry |= (dev_table_size >> 12) - 1; memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, &entry, sizeof(entry)); @@ -634,7 +635,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) BUG_ON(iommu->cmd_buf == NULL); - entry = (u64)virt_to_phys(iommu->cmd_buf); + entry = iommu_virt_to_phys(iommu->cmd_buf); entry |= MMIO_CMD_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, @@ -671,7 +672,7 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu) BUG_ON(iommu->evt_buf == NULL); - entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; + entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, &entry, sizeof(entry)); @@ -712,7 +713,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; - entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; + entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, &entry, sizeof(entry)); @@ -792,10 +793,10 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) if (!iommu->ga_log_tail) goto err_out; - entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; + entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, &entry, sizeof(entry)); - entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; + entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, &entry, sizeof(entry)); writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); @@ -2739,6 +2740,24 @@ static int __init amd_iommu_init(void) return ret; } +static bool amd_iommu_sme_check(void) +{ + if (!sme_active() || (boot_cpu_data.x86 != 0x17)) + return true; + + /* For Fam17h, a specific level of support is required */ + if (boot_cpu_data.microcode >= 0x08001205) + return true; + + if ((boot_cpu_data.microcode >= 0x08001126) && + (boot_cpu_data.microcode <= 0x080011ff)) + return true; + + pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n"); + + return false; +} + /**************************************************************************** * * Early detect code. This code runs at IOMMU detection time in the DMA @@ -2753,6 +2772,9 @@ int __init amd_iommu_detect(void) if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; + if (!amd_iommu_sme_check()) + return -ENODEV; + ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) return ret; diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 90e62e9b01c5..640c286a0ab9 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -87,6 +87,16 @@ static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) return !!(iommu->features & f); } +static inline u64 iommu_virt_to_phys(void *vaddr) +{ + return (u64)__sme_set(virt_to_phys(vaddr)); +} + +static inline void *iommu_phys_to_virt(unsigned long paddr) +{ + return phys_to_virt(__sme_clr(paddr)); +} + extern bool translation_pre_enabled(struct amd_iommu *iommu); extern struct iommu_dev_data *get_dev_data(struct device *dev); #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 5f775fef341c..f6b24c7d8b70 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -362,7 +362,7 @@ #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) -#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) +#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) #define IOMMU_PROT_MASK 0x03 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index e705fac89cb4..7d94e1d39e5e 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, return 0; } -static void mn_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - __mn_flush_page(mn, address); -} - static void mn_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) @@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) static const struct mmu_notifier_ops iommu_mn = { .release = mn_release, .clear_flush_young = mn_clear_flush_young, - .invalidate_page = mn_invalidate_page, .invalidate_range = mn_invalidate_range, }; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2be8e23448ee..6784a05dd6b2 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -876,6 +876,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf struct pci_dev *pf_pdev; pdev = to_pci_dev(dev); + +#ifdef CONFIG_X86 + /* VMD child devices currently cannot be handled individually */ + if (is_vmd(pdev->bus)) + return NULL; +#endif + /* VFs aren't listed in scope tables; we need to look up * the PF instead to find the IOMMU. */ pf_pdev = pci_physfn(pdev); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 0c9f0773601d..f6697e55c2d4 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -224,14 +224,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, intel_flush_svm_range(svm, address, 1, 1, 0); } -static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address) -{ - struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); - - intel_flush_svm_range(svm, address, 1, 1, 0); -} - /* Pages have been freed at this point */ static void intel_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, @@ -286,7 +278,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static const struct mmu_notifier_ops intel_mmuops = { .release = intel_mm_release, .change_pte = intel_change_pte, - .invalidate_page = intel_invalidate_page, .invalidate_range = intel_invalidate_range, }; |