diff options
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 278 |
1 files changed, 258 insertions, 20 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 18a89b760aaa..81837b0710a9 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -26,16 +26,18 @@ #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> -#include <linux/acpi.h> #include <acpi/acpi.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> +#include <asm/io_apic.h> +#include <asm/irq_remapping.h> #include "amd_iommu_proto.h" #include "amd_iommu_types.h" +#include "irq_remapping.h" /* * definitions for the ACPI scanning code @@ -55,6 +57,10 @@ #define IVHD_DEV_ALIAS_RANGE 0x43 #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 +#define IVHD_DEV_SPECIAL 0x48 + +#define IVHD_SPECIAL_IOAPIC 1 +#define IVHD_SPECIAL_HPET 2 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 @@ -123,6 +129,7 @@ struct ivmd_header { } __attribute__((packed)); bool amd_iommu_dump; +bool amd_iommu_irq_remap __read_mostly; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; @@ -178,7 +185,13 @@ u16 *amd_iommu_alias_table; struct amd_iommu **amd_iommu_rlookup_table; /* - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap + * This table is used to find the irq remapping table for a given device id + * quickly. + */ +struct irq_remap_table **irq_lookup_table; + +/* + * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. */ unsigned long *amd_iommu_pd_alloc_bitmap; @@ -478,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) /**************************************************************************** * - * The following functions belong the the code path which parses the ACPI table + * The following functions belong to the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific * data structures, initialize the device/alias/rlookup table and also * basically initialize the hardware. @@ -690,8 +703,33 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } +static int add_special_device(u8 type, u8 id, u16 devid) +{ + struct devid_map *entry; + struct list_head *list; + + if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET) + return -EINVAL; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->id = id; + entry->devid = devid; + + if (type == IVHD_SPECIAL_IOAPIC) + list = &ioapic_map; + else + list = &hpet_map; + + list_add_tail(&entry->list, list); + + return 0; +} + /* - * Reads the device exclusion range from ACPI and initialize IOMMU with + * Reads the device exclusion range from ACPI and initializes the IOMMU with * it */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) @@ -717,7 +755,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ -static void __init init_iommu_from_acpi(struct amd_iommu *iommu, +static int __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_header *h) { u8 *p = (u8 *)h; @@ -867,12 +905,43 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, flags, ext_flags); } break; + case IVHD_DEV_SPECIAL: { + u8 handle, type; + const char *var; + u16 devid; + int ret; + + handle = e->ext & 0xff; + devid = (e->ext >> 8) & 0xffff; + type = (e->ext >> 24) & 0xff; + + if (type == IVHD_SPECIAL_IOAPIC) + var = "IOAPIC"; + else if (type == IVHD_SPECIAL_HPET) + var = "HPET"; + else + var = "UNKNOWN"; + + DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", + var, (int)handle, + PCI_BUS(devid), + PCI_SLOT(devid), + PCI_FUNC(devid)); + + set_dev_entry_from_acpi(iommu, devid, e->flags, 0); + ret = add_special_device(type, handle, devid); + if (ret) + return ret; + break; + } default: break; } p += ivhd_entry_length(p); } + + return 0; } /* Initializes the device->iommu mapping for the driver */ @@ -912,6 +981,8 @@ static void __init free_iommu_all(void) */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { + int ret; + spin_lock_init(&iommu->lock); /* Add IOMMU to internal data structures */ @@ -947,7 +1018,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->int_enabled = false; - init_iommu_from_acpi(iommu, h); + ret = init_iommu_from_acpi(iommu, h); + if (ret) + return ret; + + /* + * Make sure IOMMU is not considered to translate itself. The IVRS + * table tells us so, but this is a lie! + */ + amd_iommu_rlookup_table[iommu->devid] = NULL; + init_iommu_devices(iommu); return 0; @@ -1115,9 +1195,11 @@ static void print_iommu_info(void) if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } - } pr_cont("\n"); + } } + if (irq_remapping_enabled) + pr_info("AMD-Vi: Interrupt remapping enabled\n"); } static int __init amd_iommu_init_pci(void) @@ -1141,7 +1223,7 @@ static int __init amd_iommu_init_pci(void) /**************************************************************************** * * The following functions initialize the MSI interrupts for all IOMMUs - * in the system. Its a bit challenging because there could be multiple + * in the system. It's a bit challenging because there could be multiple * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per * pci_dev. * @@ -1199,7 +1281,7 @@ enable_faults: * * The next functions belong to the third pass of parsing the ACPI * table. In this last pass the memory mapping requirements are - * gathered (like exclusion and unity mapping reanges). + * gathered (like exclusion and unity mapping ranges). * ****************************************************************************/ @@ -1308,7 +1390,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) * Init the device table to not allow DMA access for devices and * suppress all page faults */ -static void init_device_table(void) +static void init_device_table_dma(void) { u32 devid; @@ -1318,6 +1400,27 @@ static void init_device_table(void) } } +static void __init uninit_device_table_dma(void) +{ + u32 devid; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { + amd_iommu_dev_table[devid].data[0] = 0ULL; + amd_iommu_dev_table[devid].data[1] = 0ULL; + } +} + +static void init_device_table(void) +{ + u32 devid; + + if (!amd_iommu_irq_remap) + return; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) + set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); +} + static void iommu_init_flags(struct amd_iommu *iommu) { iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? @@ -1466,10 +1569,14 @@ static struct syscore_ops amd_iommu_syscore_ops = { static void __init free_on_init_error(void) { - amd_iommu_uninit_devices(); + free_pages((unsigned long)irq_lookup_table, + get_order(rlookup_table_size)); - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); + if (amd_iommu_irq_cache) { + kmem_cache_destroy(amd_iommu_irq_cache); + amd_iommu_irq_cache = NULL; + + } free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); @@ -1482,8 +1589,6 @@ static void __init free_on_init_error(void) free_iommu_all(); - free_unity_maps(); - #ifdef CONFIG_GART_IOMMU /* * We failed to initialize the AMD IOMMU - try fallback to GART @@ -1494,6 +1599,58 @@ static void __init free_on_init_error(void) #endif } +/* SB IOAPIC is always on this device in AMD systems */ +#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) + +static bool __init check_ioapic_information(void) +{ + bool ret, has_sb_ioapic; + int idx; + + has_sb_ioapic = false; + ret = false; + + for (idx = 0; idx < nr_ioapics; idx++) { + int devid, id = mpc_ioapic_id(idx); + + devid = get_ioapic_devid(id); + if (devid < 0) { + pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id); + ret = false; + } else if (devid == IOAPIC_SB_DEVID) { + has_sb_ioapic = true; + ret = true; + } + } + + if (!has_sb_ioapic) { + /* + * We expect the SB IOAPIC to be listed in the IVRS + * table. The system timer is connected to the SB IOAPIC + * and if we don't have it in the list the system will + * panic at boot time. This situation usually happens + * when the BIOS is buggy and provides us the wrong + * device id for the IOAPIC in the system. + */ + pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n"); + } + + if (!ret) + pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n"); + + return ret; +} + +static void __init free_dma_resources(void) +{ + amd_iommu_uninit_devices(); + + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, + get_order(MAX_DOMAIN_ID/8)); + + free_unity_maps(); +} + /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt @@ -1580,9 +1737,6 @@ static int __init early_amd_iommu_init(void) if (amd_iommu_pd_alloc_bitmap == NULL) goto out; - /* init the device table */ - init_device_table(); - /* * let all alias entries point to itself */ @@ -1605,10 +1759,35 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + if (amd_iommu_irq_remap) + amd_iommu_irq_remap = check_ioapic_information(); + + if (amd_iommu_irq_remap) { + /* + * Interrupt remapping enabled, create kmem_cache for the + * remapping tables. + */ + amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", + MAX_IRQS_PER_TABLE * sizeof(u32), + IRQ_TABLE_ALIGNMENT, + 0, NULL); + if (!amd_iommu_irq_cache) + goto out; + + irq_lookup_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(rlookup_table_size)); + if (!irq_lookup_table) + goto out; + } + ret = init_memory_definitions(ivrs_base); if (ret) goto out; + /* init the device table */ + init_device_table(); + out: /* Don't leak any ACPI memory */ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); @@ -1652,13 +1831,22 @@ static bool detect_ivrs(void) /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); + if (!disable_irq_remap) + amd_iommu_irq_remap = true; + return true; } static int amd_iommu_init_dma(void) { + struct amd_iommu *iommu; int ret; + init_device_table_dma(); + + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else @@ -1749,7 +1937,48 @@ static int __init iommu_go_to_state(enum iommu_init_state state) return ret; } +#ifdef CONFIG_IRQ_REMAP +int __init amd_iommu_prepare(void) +{ + return iommu_go_to_state(IOMMU_ACPI_FINISHED); +} +int __init amd_iommu_supported(void) +{ + return amd_iommu_irq_remap ? 1 : 0; +} + +int __init amd_iommu_enable(void) +{ + int ret; + + ret = iommu_go_to_state(IOMMU_ENABLED); + if (ret) + return ret; + + irq_remapping_enabled = 1; + + return 0; +} + +void amd_iommu_disable(void) +{ + amd_iommu_suspend(); +} + +int amd_iommu_reenable(int mode) +{ + amd_iommu_resume(); + + return 0; +} + +int __init amd_iommu_enable_faulting(void) +{ + /* We enable MSI later when PCI is initialized */ + return 0; +} +#endif /* * This is the core init function for AMD IOMMU hardware in the system. @@ -1762,8 +1991,17 @@ static int __init amd_iommu_init(void) ret = iommu_go_to_state(IOMMU_INITIALIZED); if (ret) { - disable_iommus(); - free_on_init_error(); + free_dma_resources(); + if (!irq_remapping_enabled) { + disable_iommus(); + free_on_init_error(); + } else { + struct amd_iommu *iommu; + + uninit_device_table_dma(); + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + } } return ret; |