diff options
author | Kevin Mitchell <kevmitch@arista.com> | 2019-06-12 23:52:05 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2019-07-01 14:03:07 +0200 |
commit | 5c90501a7290e8066a5530f491e764730791945a (patch) | |
tree | 13d2f56acf1122762ca7b7fcde9c382d72a9ad67 /drivers/iommu/amd_iommu_init.c | |
parent | iommu/amd: Move gart fallback to amd_iommu_init (diff) | |
download | linux-5c90501a7290e8066a5530f491e764730791945a.tar.xz linux-5c90501a7290e8066a5530f491e764730791945a.zip |
iommu/amd: Only free resources once on init error
When amd_iommu=off was specified on the command line, free_X_resources
functions were called immediately after early_amd_iommu_init. They were
then called again when amd_iommu_init also failed (as expected).
Instead, call them only once: at the end of state_next() whenever
there's an error. These functions should be safe to call any time and
any number of times. However, since state_next is never called again in
an error state, the cleanup will only ever be run once.
This also ensures that cleanup code is run as soon as possible after an
error is detected rather than waiting for amd_iommu_init() to be called.
Signed-off-by: Kevin Mitchell <kevmitch@arista.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b90e26effe0a..e308fb6a2908 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2631,8 +2631,6 @@ static int __init state_next(void) init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { pr_info("AMD IOMMU disabled on kernel command-line\n"); - free_dma_resources(); - free_iommu_resources(); init_state = IOMMU_CMDLINE_DISABLED; ret = -EINVAL; } @@ -2673,6 +2671,19 @@ static int __init state_next(void) BUG(); } + if (ret) { + free_dma_resources(); + if (!irq_remapping_enabled) { + disable_iommus(); + free_iommu_resources(); + } else { + struct amd_iommu *iommu; + + uninit_device_table_dma(); + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + } + } return ret; } @@ -2746,18 +2757,6 @@ static int __init amd_iommu_init(void) int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); - if (ret) { - free_dma_resources(); - if (!irq_remapping_enabled) { - disable_iommus(); - free_iommu_resources(); - } else { - uninit_device_table_dma(); - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); - } - } - #ifdef CONFIG_GART_IOMMU if (ret && list_empty(&amd_iommu_list)) { /* |