diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-02-12 00:34:01 +0100 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2020-04-14 15:47:11 +0200 |
commit | c9bf318f77b3a78483e656e609d005c52aadc86d (patch) | |
tree | 475b6e6950ba1387736445e93c261621e2ab6513 | |
parent | x86/mce/amd: Do proper cleanup on error paths (diff) | |
download | linux-c9bf318f77b3a78483e656e609d005c52aadc86d.tar.xz linux-c9bf318f77b3a78483e656e609d005c52aadc86d.zip |
x86/mce/amd: Init thresholding machinery only on relevant vendors
... and not unconditionally.
[ bp: Add a new vendor_flags bit for that. ]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200403161943.1458-3-bp@alien8.de
-rw-r--r-- | arch/x86/kernel/cpu/mce/amd.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mce/core.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mce/internal.h | 9 |
3 files changed, 17 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 477cf773cf1c..c3b3326ad4ac 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1442,15 +1442,20 @@ free_out: int mce_threshold_remove_device(unsigned int cpu) { + struct threshold_bank **bp = this_cpu_read(threshold_banks); unsigned int bank; + if (!bp) + return 0; + for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { if (!(per_cpu(bank_map, cpu) & (1 << bank))) continue; threshold_remove_bank(cpu, bank); } - kfree(per_cpu(threshold_banks, cpu)); - per_cpu(threshold_banks, cpu) = NULL; + /* Clear the pointer before freeing the memory */ + this_cpu_write(threshold_banks, NULL); + kfree(bp); return 0; } @@ -1461,6 +1466,9 @@ int mce_threshold_create_device(unsigned int cpu) struct threshold_bank **bp; int err = 0; + if (!mce_flags.amd_threshold) + return 0; + bp = per_cpu(threshold_banks, cpu); if (bp) return 0; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 54165f3569e8..43ca91e14a77 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1756,6 +1756,7 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); + mce_flags.amd_threshold = 1; if (mce_flags.smca) { msr_ops.ctl = smca_ctl_reg; diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 3b008172ad73..74a01829c4f4 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -148,7 +148,7 @@ struct mce_vendor_flags { * Recovery. It indicates support for data poisoning in HW and deferred * error interrupts. */ - succor : 1, + succor : 1, /* * (AMD) SMCA: This bit indicates support for Scalable MCA which expands @@ -156,9 +156,12 @@ struct mce_vendor_flags { * banks. Also, to accommodate the new banks and registers, the MCA * register space is moved to a new MSR range. */ - smca : 1, + smca : 1, - __reserved_0 : 61; + /* AMD-style error thresholding banks present. */ + amd_threshold : 1, + + __reserved_0 : 60; }; extern struct mce_vendor_flags mce_flags; |