summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/Kconfig2
-rw-r--r--drivers/iommu/amd/amd_iommu.h10
-rw-r--r--drivers/iommu/amd/init.c79
-rw-r--r--drivers/iommu/amd/iommu.c182
-rw-r--r--drivers/iommu/amd/iommu_v2.c27
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c11
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/hyperv-iommu.c8
-rw-r--r--drivers/iommu/intel/dmar.c10
-rw-r--r--drivers/iommu/intel/iommu.c123
-rw-r--r--drivers/iommu/intel/irq_remapping.c129
-rw-r--r--drivers/iommu/intel/pasid.c31
-rw-r--r--drivers/iommu/intel/pasid.h24
-rw-r--r--drivers/iommu/intel/svm.c47
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/irq_remapping.c23
-rw-r--r--drivers/iommu/irq_remapping.h5
-rw-r--r--drivers/iommu/virtio-iommu.c2
18 files changed, 372 insertions, 351 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 1f061d91e0b8..626b97d0dd21 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,7 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
+ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 57309716fd18..030ee90197a1 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -45,12 +45,12 @@ extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
#ifdef CONFIG_IRQ_REMAP
@@ -66,7 +66,7 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
#define PPR_INVALID 0x1
#define PPR_FAILURE 0xf
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag);
static inline bool is_rd890_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 958050c213f9..1ba6b4cc56e8 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1104,25 +1104,6 @@ static int __init add_early_maps(void)
}
/*
- * Reads the device exclusion range from ACPI and initializes the IOMMU with
- * it
- */
-static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
-{
- if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
- return;
-
- /*
- * Treat per-device exclusion ranges as r/w unity-mapped regions
- * since some buggy BIOSes might lead to the overwritten exclusion
- * range (exclusion_start and exclusion_length members). This
- * happens when there are multiple exclusion ranges (IVMD entries)
- * defined in ACPI table.
- */
- m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
-}
-
-/*
* Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it.
*/
@@ -1511,7 +1492,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1520,8 +1508,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * XT, GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling them.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+
/*
* Note: Since iommu_update_intcapxt() leverages
* the IOMMU MMIO access to MSI capability block registers
@@ -2056,30 +2054,6 @@ static void __init free_unity_maps(void)
}
}
-/* called when we find an exclusion range definition in ACPI */
-static int __init init_exclusion_range(struct ivmd_header *m)
-{
- int i;
-
- switch (m->type) {
- case ACPI_IVMD_TYPE:
- set_device_exclusion_range(m->devid, m);
- break;
- case ACPI_IVMD_TYPE_ALL:
- for (i = 0; i <= amd_iommu_last_bdf; ++i)
- set_device_exclusion_range(i, m);
- break;
- case ACPI_IVMD_TYPE_RANGE:
- for (i = m->devid; i <= m->aux; ++i)
- set_device_exclusion_range(i, m);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
/* called for unity map ACPI definition */
static int __init init_unity_map_range(struct ivmd_header *m)
{
@@ -2090,9 +2064,6 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (e == NULL)
return -ENOMEM;
- if (m->flags & IVMD_FLAG_EXCL_RANGE)
- init_exclusion_range(m);
-
switch (m->type) {
default:
kfree(e);
@@ -2116,6 +2087,16 @@ static int __init init_unity_map_range(struct ivmd_header *m)
e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
e->prot = m->flags >> 1;
+ /*
+ * Treat per-device exclusion ranges as r/w unity-mapped regions
+ * since some buggy BIOSes might lead to the overwritten exclusion
+ * range (exclusion_start and exclusion_length members). This
+ * happens when there are multiple exclusion ranges (IVMD entries)
+ * defined in ACPI table.
+ */
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
+
DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
" range_start: %016llx range_end: %016llx flags: %x\n", s,
PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
@@ -2258,7 +2239,7 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
iommu_feature_enable(iommu, CONTROL_GAM_EN);
- /* Fall through */
+ fallthrough;
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index ba9f3dbc5b94..9e231caa5012 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -513,10 +513,11 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
struct device *dev = iommu->iommu.dev;
- int type, devid, pasid, flags, tag;
+ int type, devid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
u64 address;
+ u32 pasid;
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -729,7 +730,21 @@ static void iommu_poll_ga_log(struct amd_iommu *iommu)
}
}
}
-#endif /* CONFIG_IRQ_REMAP */
+
+static void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
+{
+ if (!irq_remapping_enabled || !dev_is_pci(dev) ||
+ pci_dev_has_special_msi_domain(to_pci_dev(dev)))
+ return;
+
+ dev_set_msi_domain(dev, iommu->msi_domain);
+}
+
+#else /* CONFIG_IRQ_REMAP */
+static inline void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
+#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
(MMIO_STATUS_EVT_INT_MASK | \
@@ -909,7 +924,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
@@ -927,7 +942,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
}
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int qdep, u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
@@ -947,7 +962,7 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
-static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int status, int tag, bool gn)
{
memset(cmd, 0, sizeof(*cmd));
@@ -2157,6 +2172,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(dev);
} else {
+ amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
}
@@ -2659,7 +2675,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
if (!dev_data)
return 0;
- if (dev_data->iommu_v2)
+ /*
+ * Do not identity map IOMMUv2 capable devices when memory encryption is
+ * active, because some of those devices (AMD GPUs) don't have the
+ * encryption bit in their DMA-mask and require remapping.
+ */
+ if (!mem_encrypt_active() && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
@@ -2781,7 +2802,7 @@ out:
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-static int __flush_pasid(struct protection_domain *domain, int pasid,
+static int __flush_pasid(struct protection_domain *domain, u32 pasid,
u64 address, bool size)
{
struct iommu_dev_data *dev_data;
@@ -2842,13 +2863,13 @@ out:
return ret;
}
-static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address)
{
return __flush_pasid(domain, pasid, address, false);
}
-int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address)
{
struct protection_domain *domain = to_pdomain(dom);
@@ -2863,13 +2884,13 @@ int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
}
EXPORT_SYMBOL(amd_iommu_flush_page);
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
true);
}
-int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
@@ -2883,7 +2904,7 @@ int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
}
EXPORT_SYMBOL(amd_iommu_flush_tlb);
-static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
{
int index;
u64 *pte;
@@ -2915,7 +2936,7 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
return pte;
}
-static int __set_gcr3(struct protection_domain *domain, int pasid,
+static int __set_gcr3(struct protection_domain *domain, u32 pasid,
unsigned long cr3)
{
struct domain_pgtable pgtable;
@@ -2934,7 +2955,7 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
return __amd_iommu_flush_tlb(domain, pasid);
}
-static int __clear_gcr3(struct protection_domain *domain, int pasid)
+static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
{
struct domain_pgtable pgtable;
u64 *pte;
@@ -2952,7 +2973,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
return __amd_iommu_flush_tlb(domain, pasid);
}
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3)
{
struct protection_domain *domain = to_pdomain(dom);
@@ -2967,7 +2988,7 @@ int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
}
EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
@@ -2981,7 +3002,7 @@ int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
}
EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
-int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -3292,6 +3313,7 @@ out:
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
struct amd_ir_data *data)
{
+ bool ret;
struct irq_remap_table *table;
struct amd_iommu *iommu;
unsigned long flags;
@@ -3309,10 +3331,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
entry = (struct irte_ga *)table->table;
entry = &entry[index];
- entry->lo.fields_remap.valid = 0;
- entry->hi.val = irte->hi.val;
- entry->lo.val = irte->lo.val;
- entry->lo.fields_remap.valid = 1;
+
+ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
+ entry->lo.val, entry->hi.val,
+ irte->lo.val, irte->hi.val);
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ WARN_ON(!ret);
+
if (data)
data->ref = entry;
@@ -3505,69 +3535,51 @@ static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
static int get_devid(struct irq_alloc_info *info)
{
- int devid = -1;
-
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
- devid = get_ioapic_devid(info->ioapic_id);
- break;
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ return get_ioapic_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_HPET:
- devid = get_hpet_devid(info->hpet_id);
- break;
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- devid = get_device_id(&info->msi_dev->dev);
- break;
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return get_hpet_devid(info->devid);
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ return get_device_id(msi_desc_to_dev(info->desc));
default:
- BUG_ON(1);
- break;
+ WARN_ON_ONCE(1);
+ return -1;
}
-
- return devid;
}
-static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
+static struct irq_domain *get_irq_domain_for_devid(struct irq_alloc_info *info,
+ int devid)
{
- struct amd_iommu *iommu;
- int devid;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!info)
+ if (!iommu)
return NULL;
- devid = get_devid(info);
- if (devid >= 0) {
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu)
- return iommu->ir_domain;
+ switch (info->type) {
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return iommu->ir_domain;
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
}
-
- return NULL;
}
static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
{
- struct amd_iommu *iommu;
int devid;
if (!info)
return NULL;
- switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- devid = get_device_id(&info->msi_dev->dev);
- if (devid < 0)
- return NULL;
-
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu)
- return iommu->msi_domain;
- break;
- default:
- break;
- }
-
- return NULL;
+ devid = get_devid(info);
+ if (devid < 0)
+ return NULL;
+ return get_irq_domain_for_devid(info, devid);
}
struct irq_remap_ops amd_iommu_irq_ops = {
@@ -3576,7 +3588,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
.disable = amd_iommu_disable,
.reenable = amd_iommu_reenable,
.enable_faulting = amd_iommu_enable_faulting,
- .get_ir_irq_domain = get_ir_irq_domain,
.get_irq_domain = get_irq_domain,
};
@@ -3602,21 +3613,21 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
/* Setup IOAPIC entry */
- entry = info->ioapic_entry;
- info->ioapic_entry = NULL;
+ entry = info->ioapic.entry;
+ info->ioapic.entry = NULL;
memset(entry, 0, sizeof(*entry));
entry->vector = index;
entry->mask = 0;
- entry->trigger = info->ioapic_trigger;
- entry->polarity = info->ioapic_polarity;
+ entry->trigger = info->ioapic.trigger;
+ entry->polarity = info->ioapic.polarity;
/* Mask level triggered irqs. */
- if (info->ioapic_trigger)
+ if (info->ioapic.trigger)
entry->mask = 1;
break;
case X86_IRQ_ALLOC_TYPE_HPET:
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo = MSI_ADDR_BASE_LO;
msg->data = irte_info->index;
@@ -3660,15 +3671,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!info)
return -EINVAL;
- if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
- info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+ if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
+ info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
return -EINVAL;
/*
* With IRQ remapping enabled, don't need contiguous CPU vectors
* to support multiple MSI interrupts.
*/
- if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
devid = get_devid(info);
@@ -3696,15 +3707,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
iommu->irte_ops->set_allocated(table, i);
}
WARN_ON(table->min_index != 32);
- index = info->ioapic_pin;
+ index = info->ioapic.pin;
} else {
index = -ENOMEM;
}
- } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
- info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
- bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
+ bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ index = alloc_irq_index(devid, nr_irqs, align,
+ msi_desc_to_pci_dev(info->desc));
} else {
index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
@@ -3717,8 +3729,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
- cfg = irqd_cfg(irq_data);
- if (!irq_data || !cfg) {
+ cfg = irq_data ? irqd_cfg(irq_data) : NULL;
+ if (!cfg) {
ret = -EINVAL;
goto out_free_data;
}
@@ -3826,14 +3838,18 @@ int amd_iommu_activate_guest_mode(void *data)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ u64 valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || entry->lo.fields_vapic.guest_mode)
return 0;
+ valid = entry->lo.fields_vapic.valid;
+
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_vapic.valid = valid;
entry->lo.fields_vapic.guest_mode = 1;
entry->lo.fields_vapic.ga_log_intr = 1;
entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
@@ -3850,14 +3866,18 @@ int amd_iommu_deactivate_guest_mode(void *data)
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irq_cfg *cfg = ir_data->cfg;
+ u64 valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || !entry->lo.fields_vapic.guest_mode)
return 0;
+ valid = entry->lo.fields_remap.valid;
+
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->irq_dest_mode;
entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
entry->hi.fields.vector = cfg->vector;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index c259108ab6dd..5ecc0bc608ec 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -40,7 +40,7 @@ struct pasid_state {
struct mmu_notifier mn; /* mmu_notifier handle */
struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
struct device_state *device_state; /* Link to our device_state */
- int pasid; /* PASID index */
+ u32 pasid; /* PASID index */
bool invalid; /* Used during setup and
teardown of the pasid */
spinlock_t lock; /* Protect pri_queues and
@@ -70,7 +70,7 @@ struct fault {
struct mm_struct *mm;
u64 address;
u16 devid;
- u16 pasid;
+ u32 pasid;
u16 tag;
u16 finish;
u16 flags;
@@ -150,7 +150,7 @@ static void put_device_state(struct device_state *dev_state)
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
- int pasid, bool alloc)
+ u32 pasid, bool alloc)
{
struct pasid_state **root, **ptr;
int level, index;
@@ -184,7 +184,7 @@ static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state
static int set_pasid_state(struct device_state *dev_state,
struct pasid_state *pasid_state,
- int pasid)
+ u32 pasid)
{
struct pasid_state **ptr;
unsigned long flags;
@@ -211,7 +211,7 @@ out_unlock:
return ret;
}
-static void clear_pasid_state(struct device_state *dev_state, int pasid)
+static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
{
struct pasid_state **ptr;
unsigned long flags;
@@ -229,7 +229,7 @@ out_unlock:
}
static struct pasid_state *get_pasid_state(struct device_state *dev_state,
- int pasid)
+ u32 pasid)
{
struct pasid_state **ptr, *ret = NULL;
unsigned long flags;
@@ -594,7 +594,7 @@ static struct notifier_block ppr_nb = {
.notifier_call = ppr_notifier,
};
-int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct task_struct *task)
{
struct pasid_state *pasid_state;
@@ -615,7 +615,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
return -EINVAL;
ret = -EINVAL;
- if (pasid < 0 || pasid >= dev_state->max_pasids)
+ if (pasid >= dev_state->max_pasids)
goto out;
ret = -ENOMEM;
@@ -679,7 +679,7 @@ out:
}
EXPORT_SYMBOL(amd_iommu_bind_pasid);
-void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
@@ -695,7 +695,7 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
if (dev_state == NULL)
return;
- if (pasid < 0 || pasid >= dev_state->max_pasids)
+ if (pasid >= dev_state->max_pasids)
goto out;
pasid_state = get_pasid_state(dev_state, pasid);
@@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
might_sleep();
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (mem_encrypt_active())
+ return -ENODEV;
+
if (!amd_iommu_v2_supported())
return -ENODEV;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 7196207be7ea..c192544e874b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -903,7 +903,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_CFGI_CD:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_CFGI_STE:
cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
@@ -936,7 +936,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
break;
case CMDQ_OP_TLBI_NH_ASID:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
- /* Fallthrough */
+ fallthrough;
case CMDQ_OP_TLBI_S12_VMALL:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
@@ -1036,7 +1036,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
*/
return;
case CMDQ_ERR_CERROR_ILL_IDX:
- /* Fallthrough */
default:
break;
}
@@ -3758,7 +3757,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
- /* Fallthrough */
+ fallthrough;
case IDR0_STALL_MODEL_STALL:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
@@ -3778,7 +3777,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
switch (FIELD_GET(IDR0_TTF, reg)) {
case IDR0_TTF_AARCH32_64:
smmu->ias = 40;
- /* Fallthrough */
+ fallthrough;
case IDR0_TTF_AARCH64:
break;
default:
@@ -3875,7 +3874,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
default:
dev_info(smmu->dev,
"unknown output address size. Truncating to 48-bit\n");
- /* Fallthrough */
+ fallthrough;
case IDR5_OAS_48_BIT:
smmu->oas = 48;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index bad3c0ce10cb..de324b4eedfe 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1295,13 +1295,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENODEV;
data = platform_get_drvdata(sysmmu);
- if (!data)
+ if (!data) {
+ put_device(&sysmmu->dev);
return -ENODEV;
+ }
if (!owner) {
owner = kzalloc(sizeof(*owner), GFP_KERNEL);
- if (!owner)
+ if (!owner) {
+ put_device(&sysmmu->dev);
return -ENOMEM;
+ }
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 8919c1c70b68..e09e2d734c57 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -101,7 +101,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
* affinity() set vector and dest_apicid directly into IO-APIC entry.
*/
- irq_data->chip_data = info->ioapic_entry;
+ irq_data->chip_data = info->ioapic.entry;
/*
* Hypver-V IO APIC irq affinity should be in the scope of
@@ -182,9 +182,9 @@ static int __init hyperv_enable_irq_remapping(void)
return IRQ_REMAP_X2APIC_MODE;
}
-static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
+static struct irq_domain *hyperv_get_irq_domain(struct irq_alloc_info *info)
{
- if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC)
+ if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT)
return ioapic_ir_domain;
else
return NULL;
@@ -193,7 +193,7 @@ static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
struct irq_remap_ops hyperv_irq_remap_ops = {
.prepare = hyperv_prepare_irq_remapping,
.enable = hyperv_enable_irq_remapping,
- .get_ir_irq_domain = hyperv_get_ir_irq_domain,
+ .get_irq_domain = hyperv_get_irq_domain,
};
#endif
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 93e6345f3414..a8fb82c166eb 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -316,6 +316,9 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
if (ret < 0 && dmar_dev_scope_status == 0)
dmar_dev_scope_status = ret;
+ if (ret >= 0)
+ intel_irq_remap_add_device(info);
+
return ret;
}
@@ -1482,7 +1485,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
}
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
- u64 granu, int pasid)
+ u64 granu, u32 pasid)
{
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1796,7 +1799,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
}
static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, int pasid, u16 source_id,
+ u8 fault_reason, u32 pasid, u16 source_id,
unsigned long long addr)
{
const char *reason;
@@ -1846,7 +1849,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u8 fault_reason;
u16 source_id;
u64 guest_addr;
- int type, pasid;
+ u32 pasid;
+ int type;
u32 data;
bool pasid_present;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index e9864e52b0e9..342e42e9c977 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level)
return (level - 1) * LEVEL_STRIDE;
}
-static inline int pfn_level_offset(unsigned long pfn, int level)
+static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline unsigned long level_mask(int level)
+static inline u64 level_mask(int level)
{
- return -1UL << level_to_offset_bits(level);
+ return -1ULL << level_to_offset_bits(level);
}
-static inline unsigned long level_size(int level)
+static inline u64 level_size(int level)
{
- return 1UL << level_to_offset_bits(level);
+ return 1ULL << level_to_offset_bits(level);
}
-static inline unsigned long align_to_level(unsigned long pfn, int level)
+static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -364,7 +364,6 @@ static int iommu_skip_te_disable;
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
struct device_domain_info *get_domain_info(struct device *dev)
{
@@ -374,8 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
return NULL;
info = dev_iommu_priv_get(dev);
- if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
- info == DEFER_DEVICE_DOMAIN_INFO))
+ if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
return info;
@@ -742,11 +740,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
return &context[devfn];
}
-static int iommu_dummy(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
static bool attach_deferred(struct device *dev)
{
return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
@@ -779,6 +772,53 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
+static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ u32 vtbar;
+ int rc;
+
+ /* We know that this device on this chipset has its own IOMMU.
+ * If we find it under a different IOMMU, then the BIOS is lying
+ * to us. Hope that the IOMMU for this device is actually
+ * disabled, and it needs no translation...
+ */
+ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+ if (rc) {
+ /* "can't" happen */
+ dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+ return false;
+ }
+ vtbar &= 0xffff0000;
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return true;
+ }
+
+ return false;
+}
+
+static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
+{
+ if (!iommu || iommu->drhd->ignored)
+ return true;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
+ quirk_ioat_snb_local_iommu(pdev))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -788,7 +828,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
u16 segment = 0;
int i;
- if (!dev || iommu_dummy(dev))
+ if (!dev)
return NULL;
if (dev_is_pci(dev)) {
@@ -805,7 +845,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
@@ -841,6 +881,9 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
}
iommu = NULL;
out:
+ if (iommu_is_dummy(iommu, dev))
+ iommu = NULL;
+
rcu_read_unlock();
return iommu;
@@ -2447,7 +2490,7 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
- if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
+ if (unlikely(attach_deferred(dev)))
return NULL;
/* No lock here, assumes no domain exit in normal case */
@@ -2484,7 +2527,7 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
- int pasid)
+ u32 pasid)
{
int flags = PASID_FLAG_SUPERVISOR_MODE;
struct dma_pte *pgd = domain->pgd;
@@ -2621,7 +2664,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
}
/* Setup the PASID entry for requests without PASID: */
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, flags);
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
dev, PASID_RID2PASID);
@@ -2631,7 +2674,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
else
ret = intel_pasid_setup_second_level(iommu, domain,
dev, PASID_RID2PASID);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
@@ -3989,35 +4032,6 @@ static void __init iommu_exit_mempool(void)
iova_cache_put();
}
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
-
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
-
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
- pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -4049,12 +4063,8 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx) {
+ if (!dmar_map_gfx)
drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
}
}
@@ -5070,7 +5080,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
switch (type) {
case IOMMU_DOMAIN_DMA:
- /* fallthrough */
case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(0);
if (!dmar_domain) {
@@ -5164,7 +5173,7 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
return -ENODEV;
if (domain->default_pasid <= 0) {
- int pasid;
+ u32 pasid;
/* No private data needed for the default pasid */
pasid = ioasid_alloc(NULL, PASID_MIN,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 23583b0e66a5..0cfce1d3b7bb 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -204,35 +204,40 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
return rc;
}
-static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
+static struct irq_domain *map_hpet_to_ir(u8 hpet_id)
{
int i;
- for (i = 0; i < MAX_HPET_TBS; i++)
+ for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
- return ir_hpet[i].iommu;
+ return ir_hpet[i].iommu->ir_domain;
+ }
return NULL;
}
-static struct intel_iommu *map_ioapic_to_ir(int apic)
+static struct intel_iommu *map_ioapic_to_iommu(int apic)
{
int i;
- for (i = 0; i < MAX_IO_APICS; i++)
+ for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
+ }
return NULL;
}
-static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
+static struct irq_domain *map_ioapic_to_ir(int apic)
{
- struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = map_ioapic_to_iommu(apic);
- drhd = dmar_find_matched_drhd_unit(dev);
- if (!drhd)
- return NULL;
+ return iommu ? iommu->ir_domain : NULL;
+}
- return drhd->iommu;
+static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
+{
+ struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
+
+ return drhd ? drhd->iommu->ir_msi_domain : NULL;
}
static int clear_entries(struct irq_2_iommu *irq_iommu)
@@ -508,12 +513,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /* Block compatibility-format MSIs */
+ if (sts & DMA_GSTS_CFIS) {
+ iommu->gcmd &= ~DMA_GCMD_CFI;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_CFIS), sts);
+ }
+
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
@@ -996,7 +1007,7 @@ static int __init parse_ioapics_under_ir(void)
for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
int ioapic_id = mpc_ioapic_id(ioapic_idx);
- if (!map_ioapic_to_ir(ioapic_id)) {
+ if (!map_ioapic_to_iommu(ioapic_id)) {
pr_err(FW_BUG "ioapic %d has no mapping iommu, "
"interrupt remapping will be disabled\n",
ioapic_id);
@@ -1081,6 +1092,22 @@ error:
return -1;
}
+/*
+ * Store the MSI remapping domain pointer in the device if enabled.
+ *
+ * This is called from dmar_pci_bus_add_dev() so it works even when DMA
+ * remapping is disabled. Only update the pointer if the device is not
+ * already handled by a non default PCI/MSI interrupt domain. This protects
+ * e.g. VMD devices.
+ */
+void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
+{
+ if (!irq_remapping_enabled || pci_dev_has_special_msi_domain(info->dev))
+ return;
+
+ dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
+}
+
static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
{
memset(irte, 0, sizeof(*irte));
@@ -1101,51 +1128,20 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
irte->redir_hint = 1;
}
-static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
-{
- struct intel_iommu *iommu = NULL;
-
- if (!info)
- return NULL;
-
- switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_IOAPIC:
- iommu = map_ioapic_to_ir(info->ioapic_id);
- break;
- case X86_IRQ_ALLOC_TYPE_HPET:
- iommu = map_hpet_to_ir(info->hpet_id);
- break;
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- iommu = map_dev_to_ir(info->msi_dev);
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- return iommu ? iommu->ir_domain : NULL;
-}
-
static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
{
- struct intel_iommu *iommu;
-
if (!info)
return NULL;
switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- iommu = map_dev_to_ir(info->msi_dev);
- if (iommu)
- return iommu->ir_msi_domain;
- break;
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ return map_ioapic_to_ir(info->devid);
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return map_hpet_to_ir(info->devid);
default:
- break;
+ WARN_ON_ONCE(1);
+ return NULL;
}
-
- return NULL;
}
struct irq_remap_ops intel_irq_remap_ops = {
@@ -1154,7 +1150,6 @@ struct irq_remap_ops intel_irq_remap_ops = {
.disable = disable_irq_remapping,
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
- .get_ir_irq_domain = intel_get_ir_irq_domain,
.get_irq_domain = intel_get_irq_domain,
};
@@ -1278,16 +1273,16 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
/* Set source-id of interrupt request */
- set_ioapic_sid(irte, info->ioapic_id);
+ set_ioapic_sid(irte, info->devid);
apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
- info->ioapic_id, irte->present, irte->fpd,
+ info->devid, irte->present, irte->fpd,
irte->dst_mode, irte->redir_hint,
irte->trigger_mode, irte->dlvry_mode,
irte->avail, irte->vector, irte->dest_id,
irte->sid, irte->sq, irte->svt);
- entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
- info->ioapic_entry = NULL;
+ entry = (struct IR_IO_APIC_route_entry *)info->ioapic.entry;
+ info->ioapic.entry = NULL;
memset(entry, 0, sizeof(*entry));
entry->index2 = (index >> 15) & 0x1;
entry->zero = 0;
@@ -1297,21 +1292,21 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
* IO-APIC RTE will be configured with virtual vector.
* irq handler will do the explicit EOI to the io-apic.
*/
- entry->vector = info->ioapic_pin;
+ entry->vector = info->ioapic.pin;
entry->mask = 0; /* enable IRQ */
- entry->trigger = info->ioapic_trigger;
- entry->polarity = info->ioapic_polarity;
- if (info->ioapic_trigger)
+ entry->trigger = info->ioapic.trigger;
+ entry->polarity = info->ioapic.polarity;
+ if (info->ioapic.trigger)
entry->mask = 1; /* Mask level triggered irqs. */
break;
case X86_IRQ_ALLOC_TYPE_HPET:
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
- set_hpet_sid(irte, info->hpet_id);
+ set_hpet_sid(irte, info->devid);
else
- set_msi_sid(irte, info->msi_dev);
+ set_msi_sid(irte, msi_desc_to_pci_dev(info->desc));
msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle;
@@ -1362,15 +1357,15 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
if (!info || !iommu)
return -EINVAL;
- if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
- info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+ if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
+ info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
return -EINVAL;
/*
* With IRQ remapping enabled, don't need contiguous CPU vectors
* to support multiple MSI interrupts.
*/
- if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index e6faedf42fd4..b92af83b79bd 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -27,7 +27,7 @@
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
-int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
+int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
{
unsigned long flags;
u8 status_code;
@@ -58,7 +58,7 @@ int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
return ret;
}
-void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid)
+void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
{
unsigned long flags;
u8 status_code;
@@ -146,7 +146,7 @@ int intel_pasid_alloc_table(struct device *dev)
struct pasid_table *pasid_table;
struct pasid_table_opaque data;
struct page *pages;
- int max_pasid = 0;
+ u32 max_pasid = 0;
int ret, order;
int size;
@@ -168,7 +168,7 @@ int intel_pasid_alloc_table(struct device *dev)
INIT_LIST_HEAD(&pasid_table->dev);
if (info->pasid_supported)
- max_pasid = min_t(int, pci_max_pasids(to_pci_dev(dev)),
+ max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
intel_pasid_max_id);
size = max_pasid >> (PASID_PDE_SHIFT - 3);
@@ -242,7 +242,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
return info->pasid_table->max_pasid;
}
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
@@ -251,8 +251,7 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
int dir_index, index;
pasid_table = intel_pasid_get_table(dev);
- if (WARN_ON(!pasid_table || pasid < 0 ||
- pasid >= intel_pasid_get_dev_max_id(dev)))
+ if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
return NULL;
dir = pasid_table->table;
@@ -305,7 +304,7 @@ static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
}
static void
-intel_pasid_clear_entry(struct device *dev, int pasid, bool fault_ignore)
+intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
{
struct pasid_entry *pe;
@@ -444,7 +443,7 @@ pasid_set_eafe(struct pasid_entry *pe)
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
- u16 did, int pasid)
+ u16 did, u32 pasid)
{
struct qi_desc desc;
@@ -473,7 +472,7 @@ iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
struct device_domain_info *info;
u16 sid, qdep, pfsid;
@@ -499,7 +498,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
- int pasid, bool fault_ignore)
+ u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
@@ -524,7 +523,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
static void pasid_flush_caches(struct intel_iommu *iommu,
struct pasid_entry *pte,
- int pasid, u16 did)
+ u32 pasid, u16 did)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -543,7 +542,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
*/
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
- int pasid, u16 did, int flags)
+ u32 pasid, u16 did, int flags)
{
struct pasid_entry *pte;
@@ -616,7 +615,7 @@ static inline int iommu_skip_agaw(struct dmar_domain *domain,
*/
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
struct pasid_entry *pte;
struct dma_pte *pgd;
@@ -674,7 +673,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
*/
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
@@ -760,7 +759,7 @@ intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
* @addr_width: Address width of the first level (guest)
*/
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
- pgd_t *gpgd, int pasid,
+ pgd_t *gpgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width)
{
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index c9850766c3a9..97dfcffbf495 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -72,7 +72,7 @@ struct pasid_entry {
struct pasid_table {
void *table; /* pasid table pointer */
int order; /* page order of pasid table */
- int max_pasid; /* max pasid */
+ u32 max_pasid; /* max pasid */
struct list_head dev; /* device list */
};
@@ -98,31 +98,31 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
-extern u32 intel_pasid_max_id;
+extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
-void intel_pasid_free_id(int pasid);
-void *intel_pasid_lookup_id(int pasid);
+void intel_pasid_free_id(u32 pasid);
+void *intel_pasid_lookup_id(u32 pasid);
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
int intel_pasid_get_dev_max_id(struct device *dev);
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid);
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid);
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
- int pasid, u16 did, int flags);
+ u32 pasid, u16 did, int flags);
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid);
+ struct device *dev, u32 pasid);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid);
+ struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd, int pasid,
+ struct device *dev, pgd_t *pgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid,
+ struct device *dev, u32 pasid,
bool fault_ignore);
-int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid);
-void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid);
+int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
+void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 95c3164a2302..60ffe083b6d6 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -19,11 +19,12 @@
#include <linux/mm_types.h>
#include <linux/ioasid.h>
#include <asm/page.h>
+#include <asm/fpu/api.h>
#include "pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static void intel_svm_drain_prq(struct device *dev, int pasid);
+static void intel_svm_drain_prq(struct device *dev, u32 pasid);
#define PRQ_ORDER 0
@@ -399,7 +400,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
return ret;
}
-int intel_svm_unbind_gpasid(struct device *dev, int pasid)
+int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
@@ -444,9 +445,28 @@ out:
return ret;
}
+static void _load_pasid(void *unused)
+{
+ update_pasid();
+}
+
+static void load_pasid(struct mm_struct *mm, u32 pasid)
+{
+ mutex_lock(&mm->context.lock);
+
+ /* Synchronize with READ_ONCE in update_pasid(). */
+ smp_store_release(&mm->pasid, pasid);
+
+ /* Update PASID MSR on all CPUs running the mm's tasks. */
+ on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
+
+ mutex_unlock(&mm->context.lock);
+}
+
/* Caller must hold pasid_mutex, mm reference */
static int
-intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
+intel_svm_bind_mm(struct device *dev, unsigned int flags,
+ struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
@@ -590,6 +610,10 @@ intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
}
list_add_tail(&svm->list, &global_svm_list);
+ if (mm) {
+ /* The newly allocated pasid is loaded to the mm. */
+ load_pasid(mm, svm->pasid);
+ }
} else {
/*
* Binding a new device with existing PASID, need to setup
@@ -620,7 +644,7 @@ out:
}
/* Caller must hold pasid_mutex */
-static int intel_svm_unbind_mm(struct device *dev, int pasid)
+static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
@@ -653,8 +677,11 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
if (list_empty(&svm->devs)) {
ioasid_free(svm->pasid);
- if (svm->mm)
+ if (svm->mm) {
mmu_notifier_unregister(&svm->notifier, svm->mm);
+ /* Clear mm's pasid. */
+ load_pasid(svm->mm, PASID_DISABLED);
+ }
list_del(&svm->list);
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
@@ -739,7 +766,7 @@ static bool is_canonical_address(u64 addr)
* described in VT-d spec CH7.10 to drain all page requests and page
* responses pending in the hardware.
*/
-static void intel_svm_drain_prq(struct device *dev, int pasid)
+static void intel_svm_drain_prq(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct dmar_domain *domain;
@@ -1033,7 +1060,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_sva *sva = ERR_PTR(-EINVAL);
struct intel_svm_dev *sdev = NULL;
- int flags = 0;
+ unsigned int flags = 0;
int ret;
/*
@@ -1042,7 +1069,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
* and intel_svm etc.
*/
if (drvdata)
- flags = *(int *)drvdata;
+ flags = *(unsigned int *)drvdata;
mutex_lock(&pasid_mutex);
ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
if (ret)
@@ -1067,10 +1094,10 @@ void intel_svm_unbind(struct iommu_sva *sva)
mutex_unlock(&pasid_mutex);
}
-int intel_svm_get_pasid(struct iommu_sva *sva)
+u32 intel_svm_get_pasid(struct iommu_sva *sva)
{
struct intel_svm_dev *sdev;
- int pasid;
+ u32 pasid;
mutex_lock(&pasid_mutex);
sdev = to_intel_svm_dev(sva);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 609bd25bf154..0e4fbdc0f5e5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2839,7 +2839,7 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-int iommu_sva_get_pasid(struct iommu_sva *handle)
+u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 83f36f61416e..2d84b1ed205e 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -160,33 +160,12 @@ void panic_if_irq_remap(const char *msg)
}
/**
- * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
- * device serving request @info
- * @info: interrupt allocation information, used to identify the IOMMU device
- *
- * It's used to get parent irqdomain for HPET and IOAPIC irqdomains.
- * Returns pointer to IRQ domain, or NULL on failure.
- */
-struct irq_domain *
-irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
-{
- if (!remap_ops || !remap_ops->get_ir_irq_domain)
- return NULL;
-
- return remap_ops->get_ir_irq_domain(info);
-}
-
-/**
* irq_remapping_get_irq_domain - Get the irqdomain serving the request @info
* @info: interrupt allocation information, used to identify the IOMMU device
*
- * There will be one PCI MSI/MSIX irqdomain associated with each interrupt
- * remapping device, so this interface is used to retrieve the PCI MSI/MSIX
- * irqdomain serving request @info.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *
-irq_remapping_get_irq_domain(struct irq_alloc_info *info)
+struct irq_domain *irq_remapping_get_irq_domain(struct irq_alloc_info *info)
{
if (!remap_ops || !remap_ops->get_irq_domain)
return NULL;
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 6a190d504eb6..1661b3d75920 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -43,10 +43,7 @@ struct irq_remap_ops {
/* Enable fault handling */
int (*enable_faulting)(void);
- /* Get the irqdomain associated the IOMMU device */
- struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *);
-
- /* Get the MSI irqdomain associated with the IOMMU device */
+ /* Get the irqdomain associated to IOMMU device */
struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *);
};
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b4da396cce60..2bfdd5734844 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -440,7 +440,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
default:
dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
mem->subtype);
- /* Fall-through */
+ fallthrough;
case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
region = iommu_alloc_resv_region(start, size, 0,
IOMMU_RESV_RESERVED);