summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-10-02 14:00:03 +0200
committerBorislav Petkov (AMD) <bp@alien8.de>2023-10-24 15:05:55 +0200
commit0bf871651211b58c7b19f40b746b646d5311e2ec (patch)
tree427408f0042b93b53c013ba1722ea56e2a26c26f /arch
parentx86/microcode: Provide new control functions (diff)
downloadlinux-0bf871651211b58c7b19f40b746b646d5311e2ec.tar.xz
linux-0bf871651211b58c7b19f40b746b646d5311e2ec.zip
x86/microcode: Replace the all-in-one rendevous handler
with a new handler which just separates the control flow of primary and secondary CPUs. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20231002115903.433704135@linutronix.de
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c51
1 files changed, 9 insertions, 42 deletions
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 1ff38f9fdb8a..1c2710b7db6d 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -268,7 +268,7 @@ struct microcode_ctrl {
};
static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
-static atomic_t late_cpus_in, late_cpus_out;
+static atomic_t late_cpus_in;
static bool wait_for_cpus(atomic_t *cnt)
{
@@ -304,7 +304,7 @@ static bool wait_for_ctrl(void)
return false;
}
-static __maybe_unused void load_secondary(unsigned int cpu)
+static void load_secondary(unsigned int cpu)
{
unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu);
enum ucode_state ret;
@@ -339,7 +339,7 @@ static __maybe_unused void load_secondary(unsigned int cpu)
this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
}
-static __maybe_unused void load_primary(unsigned int cpu)
+static void load_primary(unsigned int cpu)
{
struct cpumask *secondaries = topology_sibling_cpumask(cpu);
enum sibling_ctrl ctrl;
@@ -376,46 +376,14 @@ static __maybe_unused void load_primary(unsigned int cpu)
static int load_cpus_stopped(void *unused)
{
- int cpu = smp_processor_id();
- enum ucode_state ret;
-
- /*
- * Wait for all CPUs to arrive. A load will not be attempted unless all
- * CPUs show up.
- * */
- if (!wait_for_cpus(&late_cpus_in)) {
- this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
- return 0;
- }
-
- /*
- * On an SMT system, it suffices to load the microcode on one sibling of
- * the core because the microcode engine is shared between the threads.
- * Synchronization still needs to take place so that no concurrent
- * loading attempts happen on multiple threads of an SMT core. See
- * below.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
- goto wait_for_siblings;
+ unsigned int cpu = smp_processor_id();
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
-
-wait_for_siblings:
- if (!wait_for_cpus(&late_cpus_out))
- panic("Timeout during microcode update!\n");
-
- /*
- * At least one thread has completed update on each core.
- * For others, simply call the update to make sure the
- * per-cpu cpuinfo can be updated with right microcode
- * revision.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
- return 0;
+ if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu)
+ load_primary(cpu);
+ else
+ load_secondary(cpu);
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
+ /* No point to wait here. The CPUs will all wait in stop_machine(). */
return 0;
}
@@ -429,7 +397,6 @@ static int load_late_stop_cpus(void)
pr_err("You should switch to early loading, if possible.\n");
atomic_set(&late_cpus_in, num_online_cpus());
- atomic_set(&late_cpus_out, num_online_cpus());
/*
* Take a snapshot before the microcode update in order to compare and