diff options
Diffstat (limited to 'arch/x86/kernel')
51 files changed, 769 insertions, 659 deletions
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index b0715c3ac18d..7f9ade13bbcf 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -18,8 +18,13 @@ ENTRY(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax - jne bogus_64_magic + je 2f + /* stop here on a saved_magic mismatch */ + movq $0xbad6d61676963, %rcx +1: + jmp 1b +2: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds @@ -37,9 +42,6 @@ ENTRY(wakeup_long64) jmp *%rax ENDPROC(wakeup_long64) -bogus_64_magic: - jmp bogus_64_magic - ENTRY(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ccd32013c47a..9d3a971ea364 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -713,7 +713,7 @@ void __init alternative_instructions(void) * Don't stop machine check exceptions while patching. * MCEs only happen when something got corrupted and in this * case we must do something about the corruption. - * Ignoring it is worse than a unlikely patching race. + * Ignoring it is worse than an unlikely patching race. * Also machine checks tend to be broadcast and if one CPU * goes into machine check the others follow quickly, so we don't * expect a machine check to cause undue problems during to code @@ -753,8 +753,8 @@ void __init alternative_instructions(void) * When you use this code to patch more than one byte of an instruction * you need to make sure that other CPUs cannot execute this code in parallel. * Also no thread must be currently preempted in the middle of these - * instructions. And on the local CPU you need to be protected again NMI or MCE - * handlers seeing an inconsistent instruction while you patch. + * instructions. And on the local CPU you need to be protected against NMI or + * MCE handlers seeing an inconsistent instruction while you patch. */ void __init_or_module text_poke_early(void *addr, const void *opcode, size_t len) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index d63e63b7d1d9..251c795b4eb3 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -21,6 +21,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -50,6 +51,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, {} }; EXPORT_SYMBOL_GPL(amd_nb_misc_ids); @@ -63,6 +65,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aa5495d0f478..9e2dd2b296cd 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -65,10 +65,10 @@ unsigned int num_processors; unsigned disabled_cpus; /* Processor that is doing the boot up */ -unsigned int boot_cpu_physical_apicid = -1U; +unsigned int boot_cpu_physical_apicid __ro_after_init = -1U; EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid); -u8 boot_cpu_apic_version; +u8 boot_cpu_apic_version __ro_after_init; /* * The highest APIC ID seen during enumeration. @@ -85,13 +85,13 @@ physid_mask_t phys_cpu_present_map; * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to * avoid undefined behaviour caused by sending INIT from AP to BSP. */ -static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; +static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID; /* * This variable controls which CPUs receive external NMIs. By default, * external NMIs are delivered only to the BSP. */ -static int apic_extnmi = APIC_EXTNMI_BSP; +static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP; /* * Map cpu index to physical APIC ID @@ -114,7 +114,7 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid); DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID); /* Local APIC was disabled by the BIOS and enabled by the kernel */ -static int enabled_via_apicbase; +static int enabled_via_apicbase __ro_after_init; /* * Handle interrupt mode configuration register (IMCR). @@ -172,23 +172,23 @@ static __init int setup_apicpmtimer(char *s) __setup("apicpmtimer", setup_apicpmtimer); #endif -unsigned long mp_lapic_addr; -int disable_apic; +unsigned long mp_lapic_addr __ro_after_init; +int disable_apic __ro_after_init; /* Disable local APIC timer from the kernel commandline or via dmi quirk */ static int disable_apic_timer __initdata; /* Local APIC timer works in C2 */ -int local_apic_timer_c2_ok; +int local_apic_timer_c2_ok __ro_after_init; EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); /* * Debug level, exported for io_apic.c */ -int apic_verbosity; +int apic_verbosity __ro_after_init; -int pic_mode; +int pic_mode __ro_after_init; /* Have we found an MP table */ -int smp_found_config; +int smp_found_config __ro_after_init; static struct resource lapic_resource = { .name = "Local APIC", @@ -199,7 +199,7 @@ unsigned int lapic_timer_period = 0; static void apic_pm_activate(void); -static unsigned long apic_phys; +static unsigned long apic_phys __ro_after_init; /* * Get the LAPIC version @@ -590,21 +590,21 @@ static u32 skx_deadline_rev(void) static const struct x86_cpu_id deadline_match[] = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), - DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_L, 0x20), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_G, 0x17), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE, 0x25), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_G, 0x17), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP, 0xb2), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_L, 0xb2), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52), - DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP, 0x52), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_L, 0x52), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE, 0x52), {}, }; @@ -834,6 +834,10 @@ bool __init apic_needs_pit(void) if (!boot_cpu_has(X86_FEATURE_APIC)) return true; + /* Virt guests may lack ARAT, but still have DEADLINE */ + if (!boot_cpu_has(X86_FEATURE_ARAT)) + return true; + /* Deadline timer is based on TSC so no further PIT action required */ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) return false; @@ -1220,25 +1224,38 @@ void clear_local_APIC(void) } /** - * disable_local_APIC - clear and disable the local APIC + * apic_soft_disable - Clears and software disables the local APIC on hotplug + * + * Contrary to disable_local_APIC() this does not touch the enable bit in + * MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC + * bus would require a hardware reset as the APIC would lose track of bus + * arbitration. On systems with FSB delivery APICBASE could be disabled, + * but it has to be guaranteed that no interrupt is sent to the APIC while + * in that state and it's not clear from the SDM whether it still responds + * to INIT/SIPI messages. Stay on the safe side and use software disable. */ -void disable_local_APIC(void) +void apic_soft_disable(void) { - unsigned int value; - - /* APIC hasn't been mapped yet */ - if (!x2apic_mode && !apic_phys) - return; + u32 value; clear_local_APIC(); - /* - * Disable APIC (implies clearing of registers - * for 82489DX!). - */ + /* Soft disable APIC (implies clearing of registers for 82489DX!). */ value = apic_read(APIC_SPIV); value &= ~APIC_SPIV_APIC_ENABLED; apic_write(APIC_SPIV, value); +} + +/** + * disable_local_APIC - clear and disable the local APIC + */ +void disable_local_APIC(void) +{ + /* APIC hasn't been mapped yet */ + if (!x2apic_mode && !apic_phys) + return; + + apic_soft_disable(); #ifdef CONFIG_X86_32 /* @@ -1303,7 +1320,7 @@ void __init sync_Arb_IDs(void) APIC_INT_LEVELTRIG | APIC_DM_INIT); } -enum apic_intr_mode_id apic_intr_mode; +enum apic_intr_mode_id apic_intr_mode __ro_after_init; static int __init apic_intr_mode_select(void) { @@ -1491,54 +1508,72 @@ static void lapic_setup_esr(void) oldvalue, value); } -static void apic_pending_intr_clear(void) +#define APIC_IR_REGS APIC_ISR_NR +#define APIC_IR_BITS (APIC_IR_REGS * 32) +#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG) + +union apic_ir { + unsigned long map[APIC_IR_MAPSIZE]; + u32 regs[APIC_IR_REGS]; +}; + +static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) { - long long max_loops = cpu_khz ? cpu_khz : 1000000; - unsigned long long tsc = 0, ntsc; - unsigned int queued; - unsigned long value; - int i, j, acked = 0; + int i, bit; + + /* Read the IRRs */ + for (i = 0; i < APIC_IR_REGS; i++) + irr->regs[i] = apic_read(APIC_IRR + i * 0x10); + + /* Read the ISRs */ + for (i = 0; i < APIC_IR_REGS; i++) + isr->regs[i] = apic_read(APIC_ISR + i * 0x10); - if (boot_cpu_has(X86_FEATURE_TSC)) - tsc = rdtsc(); /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + * If the ISR map is not empty. ACK the APIC and run another round + * to verify whether a pending IRR has been unblocked and turned + * into a ISR. */ - do { - queued = 0; - for (i = APIC_ISR_NR - 1; i >= 0; i--) - queued |= apic_read(APIC_IRR + i*0x10); - - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for_each_set_bit(j, &value, 32) { - ack_APIC_irq(); - acked++; - } - } - if (acked > 256) { - pr_err("LAPIC pending interrupts after %d EOI\n", acked); - break; - } - if (queued) { - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { - ntsc = rdtsc(); - max_loops = (long long)cpu_khz << 10; - max_loops -= ntsc - tsc; - } else { - max_loops--; - } - } - } while (queued && max_loops > 0); - WARN_ON(max_loops <= 0); + if (!bitmap_empty(isr->map, APIC_IR_BITS)) { + /* + * There can be multiple ISR bits set when a high priority + * interrupt preempted a lower priority one. Issue an ACK + * per set bit. + */ + for_each_set_bit(bit, isr->map, APIC_IR_BITS) + ack_APIC_irq(); + return true; + } + + return !bitmap_empty(irr->map, APIC_IR_BITS); +} + +/* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now the CPU has serviced that pending interrupt and it + * might not have done the ack_APIC_irq() because it thought, interrupt + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence + * a vector might get locked. It was noticed for timer irq (vector + * 0x31). Issue an extra EOI to clear ISR. + * + * If there are pending IRR bits they turn into ISR bits after a higher + * priority ISR bit has been acked. + */ +static void apic_pending_intr_clear(void) +{ + union apic_ir irr, isr; + unsigned int i; + + /* 512 loops are way oversized and give the APIC a chance to obey. */ + for (i = 0; i < 512; i++) { + if (!apic_check_and_ack(&irr, &isr)) + return; + } + /* Dump the IRR/ISR content if that failed */ + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); } /** @@ -1555,12 +1590,19 @@ static void setup_local_APIC(void) int logical_apicid, ldr_apicid; #endif - if (disable_apic) { disable_ioapic_support(); return; } + /* + * If this comes from kexec/kcrash the APIC might be enabled in + * SPIV. Soft disable it before doing further initialization. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && apic->disable_esr) { @@ -1570,8 +1612,6 @@ static void setup_local_APIC(void) apic_write(APIC_ESR, 0); } #endif - perf_events_lapic_init(); - /* * Double-check whether this APIC is really registered. * This is meaningless in clustered apic mode, so we skip it. @@ -1599,13 +1639,17 @@ static void setup_local_APIC(void) #endif /* - * Set Task Priority to 'accept all'. We never change this - * later on. + * Set Task Priority to 'accept all except vectors 0-31'. An APIC + * vector in the 16-31 range could be delivered if TPR == 0, but we + * would think it's an exception and terrible things will happen. We + * never change this later on. */ value = apic_read(APIC_TASKPRI); value &= ~APIC_TPRI_MASK; + value |= 0x10; apic_write(APIC_TASKPRI, value); + /* Clear eventually stale ISR/IRR bits */ apic_pending_intr_clear(); /* @@ -1652,6 +1696,8 @@ static void setup_local_APIC(void) value |= SPURIOUS_APIC_VECTOR; apic_write(APIC_SPIV, value); + perf_events_lapic_init(); + /* * Set up LVT0, LVT1: * diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index bbdca603f94a..7862b152a052 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,21 +8,14 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ -#include <linux/acpi.h> -#include <linux/errno.h> -#include <linux/threads.h> #include <linux/cpumask.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/hardirq.h> #include <linux/export.h> +#include <linux/acpi.h> -#include <asm/smp.h> -#include <asm/ipi.h> -#include <asm/apic.h> -#include <asm/apic_flat_64.h> #include <asm/jailhouse_para.h> +#include <asm/apic.h> + +#include "local.h" static struct apic apic_physflat; static struct apic apic_flat; @@ -83,35 +76,6 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) _flat_send_IPI_mask(mask, vector); } -static void flat_send_IPI_allbutself(int vector) -{ - int cpu = smp_processor_id(); - - if (IS_ENABLED(CONFIG_HOTPLUG_CPU) || vector == NMI_VECTOR) { - if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { - unsigned long mask = cpumask_bits(cpu_online_mask)[0]; - - if (cpu < BITS_PER_LONG) - __clear_bit(cpu, &mask); - - _flat_send_IPI_mask(mask, vector); - } - } else if (num_online_cpus() > 1) { - __default_send_IPI_shortcut(APIC_DEST_ALLBUT, - vector, apic->dest_logical); - } -} - -static void flat_send_IPI_all(int vector) -{ - if (vector == NMI_VECTOR) { - flat_send_IPI_mask(cpu_online_mask, vector); - } else { - __default_send_IPI_shortcut(APIC_DEST_ALLINC, - vector, apic->dest_logical); - } -} - static unsigned int flat_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; @@ -173,9 +137,9 @@ static struct apic apic_flat __ro_after_init = { .send_IPI = default_send_IPI_single, .send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, - .send_IPI_allbutself = flat_send_IPI_allbutself, - .send_IPI_all = flat_send_IPI_all, - .send_IPI_self = apic_send_IPI_self, + .send_IPI_allbutself = default_send_IPI_allbutself, + .send_IPI_all = default_send_IPI_all, + .send_IPI_self = default_send_IPI_self, .inquire_remote_apic = default_inquire_remote_apic, @@ -225,16 +189,6 @@ static void physflat_init_apic_ldr(void) */ } -static void physflat_send_IPI_allbutself(int vector) -{ - default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); -} - -static void physflat_send_IPI_all(int vector) -{ - default_send_IPI_mask_sequence_phys(cpu_online_mask, vector); -} - static int physflat_probe(void) { if (apic == &apic_physflat || num_possible_cpus() > 8 || @@ -276,9 +230,9 @@ static struct apic apic_physflat __ro_after_init = { .send_IPI = default_send_IPI_single_phys, .send_IPI_mask = default_send_IPI_mask_sequence_phys, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_phys, - .send_IPI_allbutself = physflat_send_IPI_allbutself, - .send_IPI_all = physflat_send_IPI_all, - .send_IPI_self = apic_send_IPI_self, + .send_IPI_allbutself = default_send_IPI_allbutself, + .send_IPI_all = default_send_IPI_all, + .send_IPI_self = default_send_IPI_self, .inquire_remote_apic = default_inquire_remote_apic, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 5078b5ce63a7..98c9bb75d185 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -9,25 +9,9 @@ * to not uglify the caller's code and allow to call (some) apic routines * like self-ipi, etc... */ - -#include <linux/threads.h> #include <linux/cpumask.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/errno.h> -#include <asm/fixmap.h> -#include <asm/mpspec.h> -#include <asm/apicdef.h> -#include <asm/apic.h> -#include <asm/setup.h> -#include <linux/smp.h> -#include <asm/ipi.h> - -#include <linux/interrupt.h> -#include <asm/acpi.h> -#include <asm/e820/api.h> +#include <asm/apic.h> static void noop_init_apic_ldr(void) { } static void noop_send_IPI(int cpu, int vector) { } diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index a5464b8b6c46..cdf45b4700f2 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -10,15 +10,15 @@ * Send feedback to <support@numascale.com> * */ - +#include <linux/types.h> #include <linux/init.h> #include <asm/numachip/numachip.h> #include <asm/numachip/numachip_csr.h> -#include <asm/ipi.h> -#include <asm/apic_flat_64.h> + #include <asm/pgtable.h> -#include <asm/pci_x86.h> + +#include "local.h" u8 numachip_system __read_mostly; static const struct apic apic_numachip1; diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index afee386ff711..38b5b51d42f6 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c @@ -4,18 +4,13 @@ * * Drives the local APIC in "clustered mode". */ -#include <linux/threads.h> #include <linux/cpumask.h> -#include <linux/kernel.h> -#include <linux/init.h> #include <linux/dmi.h> #include <linux/smp.h> -#include <asm/apicdef.h> -#include <asm/fixmap.h> -#include <asm/mpspec.h> #include <asm/apic.h> -#include <asm/ipi.h> + +#include "local.h" static unsigned bigsmp_get_apic_id(unsigned long x) { @@ -38,32 +33,12 @@ static int bigsmp_early_logical_apicid(int cpu) return early_per_cpu(x86_cpu_to_apicid, cpu); } -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = per_cpu(x86_bios_cpu_apicid, cpu); - val |= SET_APIC_LOGICAL_ID(id); - - return val; -} - /* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... + * bigsmp enables physical destination mode + * and doesn't use LDR and DFR */ static void bigsmp_init_apic_ldr(void) { - unsigned long val; - int cpu = smp_processor_id(); - - apic_write(APIC_DFR, APIC_DFR_FLAT); - val = calculate_ldr(cpu); - apic_write(APIC_LDR, val); } static void bigsmp_setup_apic_routing(void) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index c7bb6c69f21c..d6af97fd170a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use * gsi_top if ioapic_dynirq_base hasn't been initialized yet. */ - return ioapic_initialized ? ioapic_dynirq_base : gsi_top; + if (!ioapic_initialized) + return gsi_top; + /* + * For DT enabled machines ioapic_dynirq_base is irrelevant and not + * updated. So simply return @from if ioapic_dynirq_base == 0. + */ + return ioapic_dynirq_base ? : from; } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 82f9244fe61f..6ca0f91372fd 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -1,24 +1,113 @@ // SPDX-License-Identifier: GPL-2.0 + #include <linux/cpumask.h> -#include <linux/interrupt.h> - -#include <linux/mm.h> -#include <linux/delay.h> -#include <linux/spinlock.h> -#include <linux/kernel_stat.h> -#include <linux/mc146818rtc.h> -#include <linux/cache.h> -#include <linux/cpu.h> - -#include <asm/smp.h> -#include <asm/mtrr.h> -#include <asm/tlbflush.h> -#include <asm/mmu_context.h> -#include <asm/apic.h> -#include <asm/proto.h> -#include <asm/ipi.h> - -void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) +#include <linux/smp.h> + +#include "local.h" + +DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); + +#ifdef CONFIG_SMP +static int apic_ipi_shorthand_off __ro_after_init; + +static __init int apic_ipi_shorthand(char *str) +{ + get_option(&str, &apic_ipi_shorthand_off); + return 1; +} +__setup("no_ipi_broadcast=", apic_ipi_shorthand); + +static int __init print_ipi_mode(void) +{ + pr_info("IPI shorthand broadcast: %s\n", + apic_ipi_shorthand_off ? "disabled" : "enabled"); + return 0; +} +late_initcall(print_ipi_mode); + +void apic_smt_update(void) +{ + /* + * Do not switch to broadcast mode if: + * - Disabled on the command line + * - Only a single CPU is online + * - Not all present CPUs have been at least booted once + * + * The latter is important as the local APIC might be in some + * random state and a broadcast might cause havoc. That's + * especially true for NMI broadcasting. + */ + if (apic_ipi_shorthand_off || num_online_cpus() == 1 || + !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) { + static_branch_disable(&apic_use_ipi_shorthand); + } else { + static_branch_enable(&apic_use_ipi_shorthand); + } +} + +void apic_send_IPI_allbutself(unsigned int vector) +{ + if (num_online_cpus() < 2) + return; + + if (static_branch_likely(&apic_use_ipi_shorthand)) + apic->send_IPI_allbutself(vector); + else + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +/* + * Send a 'reschedule' IPI to another CPU. It goes straight through and + * wastes no time serializing anything. Worst case is that we lose a + * reschedule ... + */ +void native_smp_send_reschedule(int cpu) +{ + if (unlikely(cpu_is_offline(cpu))) { + WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); + return; + } + apic->send_IPI(cpu, RESCHEDULE_VECTOR); +} + +void native_send_call_func_single_ipi(int cpu) +{ + apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); +} + +void native_send_call_func_ipi(const struct cpumask *mask) +{ + if (static_branch_likely(&apic_use_ipi_shorthand)) { + unsigned int cpu = smp_processor_id(); + + if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask)) + goto sendmask; + + if (cpumask_test_cpu(cpu, mask)) + apic->send_IPI_all(CALL_FUNCTION_VECTOR); + else if (num_online_cpus() > 1) + apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); + return; + } + +sendmask: + apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); +} + +#endif /* CONFIG_SMP */ + +static inline int __prepare_ICR2(unsigned int mask) +{ + return SET_APIC_DEST_FIELD(mask); +} + +static inline void __xapic_wait_icr_idle(void) +{ + while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) + cpu_relax(); +} + +void __default_send_IPI_shortcut(unsigned int shortcut, int vector) { /* * Subtle. In the case of the 'never do double writes' workaround @@ -32,12 +121,16 @@ void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int /* * Wait for idle. */ - __xapic_wait_icr_idle(); + if (unlikely(vector == NMI_VECTOR)) + safe_apic_wait_icr_idle(); + else + __xapic_wait_icr_idle(); /* - * No need to touch the target chip field + * No need to touch the target chip field. Also the destination + * mode is ignored when a shorthand is used. */ - cfg = __prepare_ICR(shortcut, vector, dest); + cfg = __prepare_ICR(shortcut, vector, 0); /* * Send the IPI. The write to APIC_ICR fires this off. @@ -133,6 +226,21 @@ void default_send_IPI_single(int cpu, int vector) apic->send_IPI_mask(cpumask_of(cpu), vector); } +void default_send_IPI_allbutself(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); +} + +void default_send_IPI_all(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); +} + +void default_send_IPI_self(int vector) +{ + __default_send_IPI_shortcut(APIC_DEST_SELF, vector); +} + #ifdef CONFIG_X86_32 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, @@ -192,28 +300,6 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) local_irq_restore(flags); } -void default_send_IPI_allbutself(int vector) -{ - /* - * if there are no other CPUs in the system then we get an APIC send - * error if we try to broadcast, thus avoid sending IPIs in this case. - */ - if (!(num_online_cpus() > 1)) - return; - - __default_local_send_IPI_allbutself(vector); -} - -void default_send_IPI_all(int vector) -{ - __default_local_send_IPI_all(vector); -} - -void default_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); -} - /* must come after the send_IPI functions above for inlining */ static int convert_apicid_to_cpu(int apic_id) { diff --git a/arch/x86/kernel/apic/local.h b/arch/x86/kernel/apic/local.h new file mode 100644 index 000000000000..04797f05ce94 --- /dev/null +++ b/arch/x86/kernel/apic/local.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Historical copyright notices: + * + * Copyright 2004 James Cleverdon, IBM. + * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> + * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> + * (c) 2002,2003 Andi Kleen, SuSE Labs. + */ + +#include <linux/jump_label.h> + +#include <asm/apic.h> + +/* APIC flat 64 */ +void flat_init_apic_ldr(void); + +/* X2APIC */ +int x2apic_apic_id_valid(u32 apicid); +int x2apic_apic_id_registered(void); +void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest); +unsigned int x2apic_get_apic_id(unsigned long id); +u32 x2apic_set_apic_id(unsigned int id); +int x2apic_phys_pkg_id(int initial_apicid, int index_msb); +void x2apic_send_IPI_self(int vector); +void __x2apic_send_IPI_shorthand(int vector, u32 which); + +/* IPI */ + +DECLARE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); + +static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, + unsigned int dest) +{ + unsigned int icr = shortcut | dest; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + icr |= APIC_DM_NMI; + break; + } + return icr; +} + +void __default_send_IPI_shortcut(unsigned int shortcut, int vector); + +/* + * This is used to send an IPI with no shorthand notation (the destination is + * specified in bits 56 to 63 of the ICR). + */ +void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest); + +void default_send_IPI_single(int cpu, int vector); +void default_send_IPI_single_phys(int cpu, int vector); +void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); +void default_send_IPI_allbutself(int vector); +void default_send_IPI_all(int vector); +void default_send_IPI_self(int vector); + +#ifdef CONFIG_X86_32 +void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector); +void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector); +void default_send_IPI_mask_logical(const struct cpumask *mask, int vector); +#endif diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index ee2d91e382f1..67b33d67002f 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -6,51 +6,14 @@ * * Generic x86 APIC driver probe layer. */ -#include <linux/threads.h> -#include <linux/cpumask.h> #include <linux/export.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/init.h> #include <linux/errno.h> -#include <asm/fixmap.h> -#include <asm/mpspec.h> -#include <asm/apicdef.h> -#include <asm/apic.h> -#include <asm/setup.h> - #include <linux/smp.h> -#include <asm/ipi.h> -#include <linux/interrupt.h> +#include <asm/apic.h> #include <asm/acpi.h> -#include <asm/e820/api.h> -#ifdef CONFIG_HOTPLUG_CPU -#define DEFAULT_SEND_IPI (1) -#else -#define DEFAULT_SEND_IPI (0) -#endif - -int no_broadcast = DEFAULT_SEND_IPI; - -static __init int no_ipi_broadcast(char *str) -{ - get_option(&str, &no_broadcast); - pr_info("Using %s mode\n", - no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); - return 1; -} -__setup("no_ipi_broadcast=", no_ipi_broadcast); - -static int __init print_ipi_mode(void) -{ - pr_info("Using IPI %s mode\n", - no_broadcast ? "No-Shortcut" : "Shortcut"); - return 0; -} -late_initcall(print_ipi_mode); +#include "local.h" static int default_x86_32_early_logical_apicid(int cpu) { diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index e6560a02eb46..29f0e0984557 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -8,19 +8,9 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ -#include <linux/threads.h> -#include <linux/cpumask.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/hardirq.h> -#include <linux/dmar.h> - -#include <asm/smp.h> #include <asm/apic.h> -#include <asm/ipi.h> -#include <asm/setup.h> + +#include "local.h" /* * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. @@ -46,13 +36,6 @@ void __init default_setup_apic_routing(void) x86_platform.apic_post_init(); } -/* Same for both flat and physical. */ - -void apic_send_IPI_self(int vector) -{ - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); -} - int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { struct apic **drv; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index fdacb864c3dd..2c5676b0a6e7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -398,6 +398,17 @@ static int activate_reserved(struct irq_data *irqd) if (!irqd_can_reserve(irqd)) apicd->can_reserve = false; } + + /* + * Check to ensure that the effective affinity mask is a subset + * the user supplied affinity mask, and warn the user if it is not + */ + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), + irq_data_get_affinity_mask(irqd))) { + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", + irqd->irq); + } + return ret; } diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h deleted file mode 100644 index a49b3604027f..000000000000 --- a/arch/x86/kernel/apic/x2apic.h +++ /dev/null @@ -1,9 +0,0 @@ -/* Common bits for X2APIC cluster/physical modes. */ - -int x2apic_apic_id_valid(u32 apicid); -int x2apic_apic_id_registered(void); -void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest); -unsigned int x2apic_get_apic_id(unsigned long id); -u32 x2apic_set_apic_id(unsigned int id); -int x2apic_phys_pkg_id(int initial_apicid, int index_msb); -void x2apic_send_IPI_self(int vector); diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 609e499387a1..45e92cba92f5 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -1,15 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/threads.h> + +#include <linux/cpuhotplug.h> #include <linux/cpumask.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/dmar.h> -#include <linux/irq.h> -#include <linux/cpu.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/apic.h> -#include <asm/smp.h> -#include "x2apic.h" +#include "local.h" struct cluster_mask { unsigned int clusterid; @@ -84,12 +82,12 @@ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) static void x2apic_send_IPI_allbutself(int vector) { - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT); + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT); } static void x2apic_send_IPI_all(int vector) { - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC); } static u32 x2apic_calc_apicid(unsigned int cpu) diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index b5cf9e7b3830..bc9693841353 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -1,14 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/threads.h> + #include <linux/cpumask.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/ctype.h> -#include <linux/dmar.h> +#include <linux/acpi.h> -#include <asm/smp.h> -#include <asm/ipi.h> -#include "x2apic.h" +#include "local.h" int x2apic_phys; @@ -80,12 +75,12 @@ static void static void x2apic_send_IPI_allbutself(int vector) { - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT); + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT); } static void x2apic_send_IPI_all(int vector) { - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC); } static void init_x2apic_ldr(void) @@ -117,6 +112,14 @@ void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) native_x2apic_icr_write(cfg, apicid); } +void __x2apic_send_IPI_shorthand(int vector, u32 which) +{ + unsigned long cfg = __prepare_ICR(which, vector, 0); + + x2apic_wrmsr_fence(); + native_x2apic_icr_write(cfg, 0); +} + unsigned int x2apic_get_apic_id(unsigned long id) { return id; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1e225528f0d7..e6230af19864 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -7,42 +7,22 @@ * * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. */ +#include <linux/crash_dump.h> +#include <linux/cpuhotplug.h> #include <linux/cpumask.h> -#include <linux/hardirq.h> #include <linux/proc_fs.h> -#include <linux/threads.h> -#include <linux/kernel.h> +#include <linux/memory.h> #include <linux/export.h> -#include <linux/string.h> -#include <linux/ctype.h> -#include <linux/sched.h> -#include <linux/timer.h> -#include <linux/slab.h> -#include <linux/cpu.h> -#include <linux/init.h> -#include <linux/io.h> #include <linux/pci.h> -#include <linux/kdebug.h> -#include <linux/delay.h> -#include <linux/crash_dump.h> -#include <linux/reboot.h> -#include <linux/memory.h> -#include <linux/numa.h> +#include <asm/e820/api.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> -#include <asm/current.h> -#include <asm/pgtable.h> #include <asm/uv/bios.h> #include <asm/uv/uv.h> #include <asm/apic.h> -#include <asm/e820/api.h> -#include <asm/ipi.h> -#include <asm/smp.h> -#include <asm/x86_init.h> -#include <asm/nmi.h> -DEFINE_PER_CPU(int, x2apic_extra_bits); +static DEFINE_PER_CPU(int, x2apic_extra_bits); static enum uv_system_type uv_system_type; static bool uv_hubless_system; diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index d3d075226c0a..24d2fde30d00 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -6,13 +6,28 @@ #include <asm/ia32.h> #define __SYSCALL_64(nr, sym, qual) [nr] = 1, +#define __SYSCALL_X32(nr, sym, qual) static char syscalls_64[] = { #include <asm/syscalls_64.h> }; +#undef __SYSCALL_64 +#undef __SYSCALL_X32 + +#ifdef CONFIG_X86_X32_ABI +#define __SYSCALL_64(nr, sym, qual) +#define __SYSCALL_X32(nr, sym, qual) [nr] = 1, +static char syscalls_x32[] = { +#include <asm/syscalls_64.h> +}; +#undef __SYSCALL_64 +#undef __SYSCALL_X32 +#endif + #define __SYSCALL_I386(nr, sym, qual) [nr] = 1, static char syscalls_ia32[] = { #include <asm/syscalls_32.h> }; +#undef __SYSCALL_I386 #if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS) #include <asm/kvm_para.h> @@ -62,7 +77,6 @@ int main(void) ENTRY(cr2); ENTRY(cr3); ENTRY(cr4); - ENTRY(cr8); ENTRY(gdt_desc); BLANK(); #undef ENTRY @@ -80,6 +94,11 @@ int main(void) DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); DEFINE(NR_syscalls, sizeof(syscalls_64)); +#ifdef CONFIG_X86_X32_ABI + DEFINE(__NR_syscall_x32_max, sizeof(syscalls_x32) - 1); + DEFINE(X32_NR_syscalls, sizeof(syscalls_x32)); +#endif + DEFINE(__NR_syscall_compat_max, sizeof(syscalls_ia32) - 1); DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32)); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 68c363c341bf..90f75e515876 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -8,6 +8,7 @@ #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/random.h> +#include <linux/topology.h> #include <asm/processor.h> #include <asm/apic.h> #include <asm/cacheinfo.h> @@ -889,6 +890,10 @@ static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); +#ifdef CONFIG_NUMA + node_reclaim_distance = 32; +#endif + /* * Fix erratum 1076: CPB feature bit not being set in CPUID. * Always set it, except when running under a hypervisor. @@ -945,12 +950,8 @@ static void init_amd(struct cpuinfo_x86 *c) init_amd_cacheinfo(c); if (cpu_has(c, X86_FEATURE_XMM2)) { - unsigned long long val; - int ret; - /* - * A serializing LFENCE has less overhead than MFENCE, so - * use it for execution serialization. On families which + * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. @@ -958,19 +959,8 @@ static void init_amd(struct cpuinfo_x86 *c) msr_set_bit(MSR_F10H_DECFG, MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); - /* - * Verify that the MSR write was successful (could be running - * under a hypervisor) and only then assume that LFENCE is - * serializing. - */ - ret = rdmsrl_safe(MSR_F10H_DECFG, &val); - if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); - } else { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); - } + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c6fa3ef10b4e..91c2561b905f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -787,7 +787,7 @@ static void update_mds_branch_idle(void) #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" -void arch_smt_update(void) +void cpu_bugs_smt_update(void) { /* Enhanced IBRS implies STIBP. No update required. */ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) @@ -1184,15 +1184,15 @@ static void override_cache_bits(struct cpuinfo_x86 *c) case INTEL_FAM6_WESTMERE: case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: - case INTEL_FAM6_HASWELL_CORE: - case INTEL_FAM6_HASWELL_ULT: - case INTEL_FAM6_HASWELL_GT3E: - case INTEL_FAM6_BROADWELL_CORE: - case INTEL_FAM6_BROADWELL_GT3E: - case INTEL_FAM6_SKYLAKE_MOBILE: - case INTEL_FAM6_SKYLAKE_DESKTOP: - case INTEL_FAM6_KABYLAKE_MOBILE: - case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_HASWELL: + case INTEL_FAM6_HASWELL_L: + case INTEL_FAM6_HASWELL_G: + case INTEL_FAM6_BROADWELL: + case INTEL_FAM6_BROADWELL_G: + case INTEL_FAM6_SKYLAKE_L: + case INTEL_FAM6_SKYLAKE: + case INTEL_FAM6_KABYLAKE_L: + case INTEL_FAM6_KABYLAKE: if (c->x86_cache_bits < 44) c->x86_cache_bits = 44; break; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f125bf7ecb6f..9ae7d1bcd4f4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1050,7 +1050,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), - VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), @@ -1059,9 +1059,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(CORE_YONAH, NO_SSB), VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS), + VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS), - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS), + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS), VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS), /* @@ -1957,3 +1958,14 @@ void microcode_check(void) pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); } + +/* + * Invoked from core CPU hotplug code after hotplug operations + */ +void arch_smt_update(void) +{ + /* Handle the speculative execution misfeatures */ + cpu_bugs_smt_update(); + /* Check whether IPI broadcasting can be enabled */ + apic_smt_update(); +} diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index b5353244749b..3cbe24ca80ab 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -20,54 +20,55 @@ struct cpuid_dep { * but it's difficult to tell that to the init reference checker. */ static const struct cpuid_dep cpuid_deps[] = { - { X86_FEATURE_FXSR, X86_FEATURE_FPU }, - { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, - { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, - { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, - { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, - { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, - { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, - { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, - { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, - { X86_FEATURE_MMX, X86_FEATURE_FXSR }, - { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, - { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, - { X86_FEATURE_XSAVE, X86_FEATURE_FXSR }, - { X86_FEATURE_XMM, X86_FEATURE_FXSR }, - { X86_FEATURE_XMM2, X86_FEATURE_XMM }, - { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, - { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, - { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, - { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, - { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, - { X86_FEATURE_AES, X86_FEATURE_XMM2 }, - { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, - { X86_FEATURE_FMA, X86_FEATURE_AVX }, - { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, - { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, - { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, - { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, - { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, - { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, - { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, - { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, - { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, + { X86_FEATURE_FXSR, X86_FEATURE_FPU }, + { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, + { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, + { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, + { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, + { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, + { X86_FEATURE_MMX, X86_FEATURE_FXSR }, + { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, + { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, + { X86_FEATURE_XSAVE, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM2, X86_FEATURE_XMM }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, + { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, + { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, + { X86_FEATURE_AES, X86_FEATURE_XMM2 }, + { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, + { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VP2INTERSECT, X86_FEATURE_AVX512VL }, + { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, + { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, {} }; diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 415621ddb8a2..4e28c1fc8749 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -330,12 +330,8 @@ static void init_hygon(struct cpuinfo_x86 *c) init_hygon_cacheinfo(c); if (cpu_has(c, X86_FEATURE_XMM2)) { - unsigned long long val; - int ret; - /* - * A serializing LFENCE has less overhead than MFENCE, so - * use it for execution serialization. On families which + * Use LFENCE for execution serialization. On families which * don't have that MSR, LFENCE is already serializing. * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. @@ -343,19 +339,8 @@ static void init_hygon(struct cpuinfo_x86 *c) msr_set_bit(MSR_F10H_DECFG, MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); - /* - * Verify that the MSR write was successful (could be running - * under a hypervisor) and only then assume that LFENCE is - * serializing. - */ - ret = rdmsrl_safe(MSR_F10H_DECFG, &val); - if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); - } else { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); - } + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); } /* diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8d6d92ebeb54..c2fdc00df163 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -142,21 +142,21 @@ struct sku_microcode { u32 microcode; }; static const struct sku_microcode spectre_bad_microcodes[] = { - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, - { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, - { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, - { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, - { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, + { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, + { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, - { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, - { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, - { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, + { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, + { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, + { INTEL_FAM6_HASWELL, 0x03, 0x23 }, { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, @@ -265,9 +265,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ if (c->x86 == 6) { switch (c->x86_model) { - case 0x27: /* Penwell */ - case 0x35: /* Cloverview */ - case 0x4a: /* Merrifield */ + case INTEL_FAM6_ATOM_SALTWELL_MID: + case INTEL_FAM6_ATOM_SALTWELL_TABLET: + case INTEL_FAM6_ATOM_SILVERMONT_MID: + case INTEL_FAM6_ATOM_AIRMONT_NP: set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index e43eb6732630..88cd9598fa57 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -479,7 +479,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) switch (c->x86_model) { case INTEL_FAM6_IVYBRIDGE_X: case INTEL_FAM6_HASWELL_X: - case INTEL_FAM6_BROADWELL_XEON_D: + case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_XEON_PHI_KNL: diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index 210f1f5db5f7..87bcdc6dc2f0 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -107,11 +107,11 @@ static struct severity { */ MCESEV( AO, "Action optional: memory scrubbing error", - SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB) + SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB) ), MCESEV( AO, "Action optional: last level cache writeback error", - SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) + SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB) ), /* ignore OVER for UCNA */ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 062f77279ce3..267daad8c036 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -29,6 +29,7 @@ #include <asm/timer.h> #include <asm/reboot.h> #include <asm/nmi.h> +#include <clocksource/hyperv_timer.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); @@ -338,6 +339,15 @@ static void __init ms_hyperv_init_platform(void) x2apic_phys = 1; # endif + /* Register Hyper-V specific clocksource */ + hv_init_clocksource(); +#endif +} + +void hv_setup_sched_clock(void *sched_clock) +{ +#ifdef CONFIG_PARAVIRT + pv_ops.time.sched_clock = sched_clock; #endif } diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 3c648476d4fb..9735139cfdf8 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -30,34 +30,69 @@ #include <asm/hypervisor.h> #include <asm/timer.h> #include <asm/apic.h> +#include <asm/vmware.h> #undef pr_fmt #define pr_fmt(fmt) "vmware: " fmt -#define CPUID_VMWARE_INFO_LEAF 0x40000000 +#define CPUID_VMWARE_INFO_LEAF 0x40000000 +#define CPUID_VMWARE_FEATURES_LEAF 0x40000010 +#define CPUID_VMWARE_FEATURES_ECX_VMMCALL BIT(0) +#define CPUID_VMWARE_FEATURES_ECX_VMCALL BIT(1) + #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 -#define VMWARE_HYPERVISOR_PORT 0x5658 -#define VMWARE_PORT_CMD_GETVERSION 10 -#define VMWARE_PORT_CMD_GETHZ 45 -#define VMWARE_PORT_CMD_GETVCPU_INFO 68 -#define VMWARE_PORT_CMD_LEGACY_X2APIC 3 -#define VMWARE_PORT_CMD_VCPU_RESERVED 31 +#define VMWARE_CMD_GETVERSION 10 +#define VMWARE_CMD_GETHZ 45 +#define VMWARE_CMD_GETVCPU_INFO 68 +#define VMWARE_CMD_LEGACY_X2APIC 3 +#define VMWARE_CMD_VCPU_RESERVED 31 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ __asm__("inl (%%dx)" : \ - "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ - "0"(VMWARE_HYPERVISOR_MAGIC), \ - "1"(VMWARE_PORT_CMD_##cmd), \ - "2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \ - "memory"); + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ + "a"(VMWARE_HYPERVISOR_MAGIC), \ + "c"(VMWARE_CMD_##cmd), \ + "d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) : \ + "memory") + +#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) \ + __asm__("vmcall" : \ + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ + "a"(VMWARE_HYPERVISOR_MAGIC), \ + "c"(VMWARE_CMD_##cmd), \ + "d"(0), "b"(UINT_MAX) : \ + "memory") + +#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx) \ + __asm__("vmmcall" : \ + "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ + "a"(VMWARE_HYPERVISOR_MAGIC), \ + "c"(VMWARE_CMD_##cmd), \ + "d"(0), "b"(UINT_MAX) : \ + "memory") + +#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do { \ + switch (vmware_hypercall_mode) { \ + case CPUID_VMWARE_FEATURES_ECX_VMCALL: \ + VMWARE_VMCALL(cmd, eax, ebx, ecx, edx); \ + break; \ + case CPUID_VMWARE_FEATURES_ECX_VMMCALL: \ + VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx); \ + break; \ + default: \ + VMWARE_PORT(cmd, eax, ebx, ecx, edx); \ + break; \ + } \ + } while (0) static unsigned long vmware_tsc_khz __ro_after_init; +static u8 vmware_hypercall_mode __ro_after_init; static inline int __vmware_platform(void) { uint32_t eax, ebx, ecx, edx; - VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx); + VMWARE_CMD(GETVERSION, eax, ebx, ecx, edx); return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; } @@ -129,6 +164,10 @@ static void __init vmware_set_capabilities(void) { setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); + if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL) + setup_force_cpu_cap(X86_FEATURE_VMCALL); + else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL) + setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL); } static void __init vmware_platform_setup(void) @@ -136,7 +175,7 @@ static void __init vmware_platform_setup(void) uint32_t eax, ebx, ecx, edx; uint64_t lpj, tsc_khz; - VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); + VMWARE_CMD(GETHZ, eax, ebx, ecx, edx); if (ebx != UINT_MAX) { lpj = tsc_khz = eax | (((uint64_t)ebx) << 32); @@ -174,10 +213,21 @@ static void __init vmware_platform_setup(void) vmware_set_capabilities(); } +static u8 vmware_select_hypercall(void) +{ + int eax, ebx, ecx, edx; + + cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx); + return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL | + CPUID_VMWARE_FEATURES_ECX_VMCALL)); +} + /* * While checking the dmi string information, just checking the product * serial key should be enough, as this will always have a VMware * specific string when running under VMware hypervisor. + * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode + * intentionally defaults to 0. */ static uint32_t __init vmware_platform(void) { @@ -187,8 +237,16 @@ static uint32_t __init vmware_platform(void) cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], &hyper_vendor_id[1], &hyper_vendor_id[2]); - if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) + if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) { + if (eax >= CPUID_VMWARE_FEATURES_LEAF) + vmware_hypercall_mode = + vmware_select_hypercall(); + + pr_info("hypercall mode: 0x%02x\n", + (unsigned int) vmware_hypercall_mode); + return CPUID_VMWARE_INFO_LEAF; + } } else if (dmi_available && dmi_name_in_serial("VMware") && __vmware_platform()) return 1; @@ -200,9 +258,9 @@ static uint32_t __init vmware_platform(void) static bool __init vmware_legacy_x2apic_available(void) { uint32_t eax, ebx, ecx, edx; - VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx); - return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 && - (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; + VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx); + return (eax & (1 << VMWARE_CMD_VCPU_RESERVED)) == 0 && + (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0; } const __initconst struct hypervisor_x86 x86_hyper_vmware = { diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 2bf70a2fed90..eb651fbde92a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -225,8 +225,6 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem) if (crashk_low_res.end) { ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); - if (ret) - return ret; } return ret; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 2b5886401e5f..e07424e19274 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -367,13 +367,18 @@ NOKPROBE_SYMBOL(oops_end); int __die(const char *str, struct pt_regs *regs, long err) { + const char *pr = ""; + /* Save the regs of the first oops for the executive summary later. */ if (!die_counter) exec_summary_regs = *regs; + if (IS_ENABLED(CONFIG_PREEMPTION)) + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; + printk(KERN_DEFAULT "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", + pr, IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 4215653f8a8e..21efee32e2b1 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -243,11 +243,15 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); desc = __this_cpu_read(vector_irq[vector]); - - if (!handle_irq(desc, regs)) { + if (likely(!IS_ERR_OR_NULL(desc))) { + if (IS_ENABLED(CONFIG_X86_32)) + handle_irq(desc, regs); + else + generic_handle_irq_desc(desc); + } else { ack_APIC_irq(); - if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) { + if (desc == VECTOR_UNUSED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index fc34816c6f04..a759ca97cd01 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -148,18 +148,13 @@ void do_softirq_own_stack(void) call_on_stack(__do_softirq, isp); } -bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) +void handle_irq(struct irq_desc *desc, struct pt_regs *regs) { int overflow = check_stack_overflow(); - if (IS_ERR_OR_NULL(desc)) - return false; - if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { if (unlikely(overflow)) print_stack_overflow(); generic_handle_irq_desc(desc); } - - return true; } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 6bf6517a05bb..12df3a4abfdd 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -26,15 +26,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible; DECLARE_INIT_PER_CPU(irq_stack_backing_store); -bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) -{ - if (IS_ERR_OR_NULL(desc)) - return false; - - generic_handle_irq_desc(desc); - return true; -} - #ifdef CONFIG_VMAP_STACK /* * VMAP the backing store with guard pages diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 23297ea64f5f..c44fe7d8d9a4 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -416,7 +416,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs) */ void kgdb_roundup_cpus(void) { - apic->send_IPI_allbutself(APIC_DM_NMI); + apic_send_IPI_allbutself(NMI_VECTOR); } #endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0e0b08008b5a..43fc13c831af 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -580,7 +580,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, if (setup_detour_execution(p, regs, reenter)) return; -#if !defined(CONFIG_PREEMPT) +#if !defined(CONFIG_PREEMPTION) if (p->ainsn.boostable && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ if (!reenter) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 9d4aedece363..b348dd506d58 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -403,7 +403,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, (u8 *)op->kp.addr + op->optinsn.size); len += RELATIVEJUMP_SIZE; - /* We have to use text_poke for instuction buffer because it is RO */ + /* We have to use text_poke() for instruction buffer because it is RO */ text_poke(slot, buf, len); ret = 0; out: diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 4ab377c9fffe..e820568ed4d5 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -311,7 +311,7 @@ static void kvm_guest_cpu_init(void) if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION pa |= KVM_ASYNC_PF_SEND_ALWAYS; #endif pa |= KVM_ASYNC_PF_ENABLED; @@ -502,16 +502,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) __send_ipi_mask(local_mask, vector); } -static void kvm_send_ipi_allbutself(int vector) -{ - kvm_send_ipi_mask_allbutself(cpu_online_mask, vector); -} - -static void kvm_send_ipi_all(int vector) -{ - __send_ipi_mask(cpu_online_mask, vector); -} - /* * Set the IPI entry points */ @@ -519,8 +509,6 @@ static void kvm_setup_pv_ipi(void) { apic->send_IPI_mask = kvm_send_ipi_mask; apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; - apic->send_IPI_allbutself = kvm_send_ipi_allbutself; - apic->send_IPI_all = kvm_send_ipi_all; pr_info("KVM setup pv IPIs\n"); } @@ -705,6 +693,7 @@ unsigned int kvm_arch_para_hints(void) { return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); } +EXPORT_SYMBOL_GPL(kvm_arch_para_hints); static uint32_t __init kvm_detect(void) { @@ -867,3 +856,39 @@ void __init kvm_spinlock_init(void) } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL + +static void kvm_disable_host_haltpoll(void *i) +{ + wrmsrl(MSR_KVM_POLL_CONTROL, 0); +} + +static void kvm_enable_host_haltpoll(void *i) +{ + wrmsrl(MSR_KVM_POLL_CONTROL, 1); +} + +void arch_haltpoll_enable(unsigned int cpu) +{ + if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) { + pr_err_once("kvm: host does not support poll control\n"); + pr_err_once("kvm: host upgrade recommended\n"); + return; + } + + /* Enable guest halt poll disables host halt poll */ + smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1); +} +EXPORT_SYMBOL_GPL(arch_haltpoll_enable); + +void arch_haltpoll_disable(unsigned int cpu) +{ + if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) + return; + + /* Enable guest halt poll disables host halt poll */ + smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1); +} +EXPORT_SYMBOL_GPL(arch_haltpoll_disable); +#endif diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 77854b192fef..7b45e8daad22 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -148,7 +148,7 @@ int machine_kexec_prepare(struct kimage *image) { int error; - set_pages_x(image->control_code_page, 1); + set_memory_x((unsigned long)page_address(image->control_code_page), 1); error = machine_kexec_alloc_page_tables(image); if (error) return error; @@ -162,7 +162,7 @@ int machine_kexec_prepare(struct kimage *image) */ void machine_kexec_cleanup(struct kimage *image) { - set_pages_nx(image->control_code_page, 1); + set_memory_nx((unsigned long)page_address(image->control_code_page), 1); machine_kexec_free_page_tables(image); } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 4df7705022b9..e676a9916c49 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -512,6 +512,9 @@ NOKPROBE_SYMBOL(is_debug_stack); dotraplinkage notrace void do_nmi(struct pt_regs *regs, long error_code) { + if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id())) + return; + if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); return; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 0aa6256eedd8..59d3d2763a9e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -311,10 +311,6 @@ struct paravirt_patch_template pv_ops = { .cpu.read_cr0 = native_read_cr0, .cpu.write_cr0 = native_write_cr0, .cpu.write_cr4 = native_write_cr4, -#ifdef CONFIG_X86_64 - .cpu.read_cr8 = native_read_cr8, - .cpu.write_cr8 = native_write_cr8, -#endif .cpu.wbinvd = native_wbinvd, .cpu.read_msr = native_read_msr, .cpu.write_msr = native_write_msr, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f62b498b18fb..fa4352dce491 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/dma-direct.h> #include <linux/dma-debug.h> +#include <linux/iommu.h> #include <linux/dmar.h> #include <linux/export.h> #include <linux/memblock.h> @@ -34,21 +35,6 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; -/* - * This variable becomes 1 if iommu=pt is passed on the kernel command line. - * If this variable is 1, IOMMU implementations do no DMA translation for - * devices and allow every device to access to whole physical memory. This is - * useful if a user wants to use an IOMMU only for KVM device assignment to - * guests and not for driver dma translation. - * It is also possible to disable by default in kernel config, and enable with - * iommu=nopt at boot time. - */ -#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH -int iommu_pass_through __read_mostly = 1; -#else -int iommu_pass_through __read_mostly; -#endif - extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; void __init pci_iommu_alloc(void) @@ -120,9 +106,9 @@ static __init int iommu_setup(char *p) swiotlb = 1; #endif if (!strncmp(p, "pt", 2)) - iommu_pass_through = 1; + iommu_set_default_passthrough(true); if (!strncmp(p, "nopt", 4)) - iommu_pass_through = 0; + iommu_set_default_translated(true); gart_parse_options(p); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 75fea0d48c0e..5e94c4354d4e 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -580,7 +580,7 @@ void __cpuidle default_idle(void) safe_halt(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } -#ifdef CONFIG_APM_MODULE +#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) EXPORT_SYMBOL(default_idle); #endif diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 8451f38ad399..1daf8f2aa21f 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -90,8 +90,6 @@ static void ich_force_hpet_resume(void) BUG(); else printk(KERN_DEBUG "Force enabled HPET at resume\n"); - - return; } static void ich_force_enable_hpet(struct pci_dev *dev) @@ -448,7 +446,6 @@ static void nvidia_force_enable_hpet(struct pci_dev *dev) dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; - return; } /* ISA Bridges */ @@ -513,7 +510,6 @@ static void e6xx_force_enable_hpet(struct pci_dev *dev) force_hpet_resume_type = NONE_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); - return; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, e6xx_force_enable_hpet); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 09d6bded3c1e..0cc7c0b106bb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -828,11 +828,6 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) return NMI_HANDLED; } -static void smp_send_nmi_allbutself(void) -{ - apic->send_IPI_allbutself(NMI_VECTOR); -} - /* * Halt all other CPUs, calling the specified function on each of them * @@ -861,7 +856,7 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) */ wmb(); - smp_send_nmi_allbutself(); + apic_send_IPI_allbutself(NMI_VECTOR); /* Kick CPUs looping in NMI context. */ WRITE_ONCE(crash_ipi_issued, 1); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 96421f97e75c..b8d4e9c3c070 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -115,46 +115,6 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; -/* - * this function sends a 'reschedule' IPI to another CPU. - * it goes straight through and wastes no time serializing - * anything. Worst case is that we lose a reschedule ... - */ -static void native_smp_send_reschedule(int cpu) -{ - if (unlikely(cpu_is_offline(cpu))) { - WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); - return; - } - apic->send_IPI(cpu, RESCHEDULE_VECTOR); -} - -void native_send_call_func_single_ipi(int cpu) -{ - apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); -} - -void native_send_call_func_ipi(const struct cpumask *mask) -{ - cpumask_var_t allbutself; - - if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - return; - } - - cpumask_copy(allbutself, cpu_online_mask); - __cpumask_clear_cpu(smp_processor_id(), allbutself); - - if (cpumask_equal(mask, allbutself) && - cpumask_equal(cpu_online_mask, cpu_callout_mask)) - apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); - else - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - free_cpumask_var(allbutself); -} - static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) { /* We are registered on stopping cpu too, avoid spurious NMI */ @@ -179,6 +139,12 @@ asmlinkage __visible void smp_reboot_interrupt(void) irq_exit(); } +static int register_stop_handler(void) +{ + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, + NMI_FLAG_FIRST, "smp_stop"); +} + static void native_stop_other_cpus(int wait) { unsigned long flags; @@ -209,42 +175,44 @@ static void native_stop_other_cpus(int wait) /* sync above data before sending IRQ */ wmb(); - apic->send_IPI_allbutself(REBOOT_VECTOR); + apic_send_IPI_allbutself(REBOOT_VECTOR); /* - * Don't wait longer than a second if the caller - * didn't ask us to wait. + * Don't wait longer than a second for IPI completion. The + * wait request is not checked here because that would + * prevent an NMI shutdown attempt in case that not all + * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && (wait || timeout--)) + while (num_online_cpus() > 1 && timeout--) udelay(1); } - - /* if the REBOOT_VECTOR didn't work, try with the NMI */ - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, - NMI_FLAG_FIRST, "smp_stop")) - /* Note: we ignore failures here */ - /* Hope the REBOOT_IRQ is good enough */ - goto finish; - /* sync above data before sending IRQ */ - wmb(); - - pr_emerg("Shutting down cpus with NMI\n"); + /* if the REBOOT_VECTOR didn't work, try with the NMI */ + if (num_online_cpus() > 1) { + /* + * If NMI IPI is enabled, try to register the stop handler + * and send the IPI. In any case try to wait for the other + * CPUs to stop. + */ + if (!smp_no_nmi_ipi && !register_stop_handler()) { + /* Sync above data before sending IRQ */ + wmb(); - apic->send_IPI_allbutself(NMI_VECTOR); + pr_emerg("Shutting down cpus with NMI\n"); + apic_send_IPI_allbutself(NMI_VECTOR); + } /* - * Don't wait longer than a 10 ms if the caller - * didn't ask us to wait. + * Don't wait longer than 10 ms if the caller didn't + * reqeust it. If wait is true, the machine hangs here if + * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } -finish: local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fdbd47ceb84d..69881b2d446c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1023,8 +1023,6 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle) static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, int *cpu0_nmi_registered) { - volatile u32 *trampoline_status = - (volatile u32 *) __va(real_mode_header->trampoline_status); /* start_ip had better be page-aligned! */ unsigned long start_ip = real_mode_header->trampoline_start; @@ -1116,9 +1114,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, } } - /* mark "stuck" area as not stuck */ - *trampoline_status = 0; - if (x86_platform.legacy.warm_reset) { /* * Cleanup possible dangling ends... @@ -1596,7 +1591,12 @@ int native_cpu_disable(void) if (ret) return ret; - clear_local_APIC(); + /* + * Disable the local APIC. Otherwise IPI broadcasts will reach + * it. It still responds normally to INIT, NMI, SMI, and SIPI + * messages. + */ + apic_soft_disable(); cpu_disable_common(); return 0; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57d87f79558f..c59454c382fd 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -638,7 +638,7 @@ unsigned long native_calibrate_tsc(void) * clock. */ if (crystal_khz == 0 && - boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_X) + boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT_D) crystal_khz = 25000; /* diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 067858fe4db8..e0cbe4f2af49 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -58,6 +58,10 @@ static const struct freq_desc freq_desc_ann = { 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } }; +static const struct freq_desc freq_desc_lgm = { + 1, { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 } +}; + static const struct x86_cpu_id tsc_msr_cpu_ids[] = { INTEL_CPU_FAM6(ATOM_SALTWELL_MID, freq_desc_pnw), INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET, freq_desc_clv), @@ -65,6 +69,7 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = { INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, freq_desc_tng), INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht), INTEL_CPU_FAM6(ATOM_AIRMONT_MID, freq_desc_ann), + INTEL_CPU_FAM6(ATOM_AIRMONT_NP, freq_desc_lgm), {} }; diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 5b345add550f..548fefed71ee 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -19,7 +19,7 @@ /** DOC: Emulation for User-Mode Instruction Prevention (UMIP) * * The feature User-Mode Instruction Prevention present in recent Intel - * processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str) + * processor prevents a group of instructions (SGDT, SIDT, SLDT, SMSW and STR) * from being executed with CPL > 0. Otherwise, a general protection fault is * issued. * @@ -36,8 +36,8 @@ * DOSEMU2) rely on this subset of instructions to function. * * The instructions protected by UMIP can be split in two groups. Those which - * return a kernel memory address (sgdt and sidt) and those which return a - * value (sldt, str and smsw). + * return a kernel memory address (SGDT and SIDT) and those which return a + * value (SLDT, STR and SMSW). * * For the instructions that return a kernel memory address, applications * such as WineHQ rely on the result being located in the kernel memory space, @@ -45,15 +45,13 @@ * value that, lies close to the top of the kernel memory. The limit for the GDT * and the IDT are set to zero. * - * Given that sldt and str are not commonly used in programs that run on WineHQ + * Given that SLDT and STR are not commonly used in programs that run on WineHQ * or DOSEMU2, they are not emulated. * * The instruction smsw is emulated to return the value that the register CR0 * has at boot time as set in the head_32. * - * Also, emulation is provided only for 32-bit processes; 64-bit processes - * that attempt to use the instructions that UMIP protects will receive the - * SIGSEGV signal issued as a consequence of the general protection fault. + * Emulation is provided for both 32-bit and 64-bit processes. * * Care is taken to appropriately emulate the results when segmentation is * used. That is, rather than relying on USER_DS and USER_CS, the function @@ -63,17 +61,18 @@ * application uses a local descriptor table. */ -#define UMIP_DUMMY_GDT_BASE 0xfffe0000 -#define UMIP_DUMMY_IDT_BASE 0xffff0000 +#define UMIP_DUMMY_GDT_BASE 0xfffffffffffe0000ULL +#define UMIP_DUMMY_IDT_BASE 0xffffffffffff0000ULL /* * The SGDT and SIDT instructions store the contents of the global descriptor * table and interrupt table registers, respectively. The destination is a * memory operand of X+2 bytes. X bytes are used to store the base address of - * the table and 2 bytes are used to store the limit. In 32-bit processes, the - * only processes for which emulation is provided, X has a value of 4. + * the table and 2 bytes are used to store the limit. In 32-bit processes X + * has a value of 4, in 64-bit processes X has a value of 8. */ -#define UMIP_GDT_IDT_BASE_SIZE 4 +#define UMIP_GDT_IDT_BASE_SIZE_64BIT 8 +#define UMIP_GDT_IDT_BASE_SIZE_32BIT 4 #define UMIP_GDT_IDT_LIMIT_SIZE 2 #define UMIP_INST_SGDT 0 /* 0F 01 /0 */ @@ -189,6 +188,7 @@ static int identify_insn(struct insn *insn) * @umip_inst: A constant indicating the instruction to emulate * @data: Buffer into which the dummy result is stored * @data_size: Size of the emulated result + * @x86_64: true if process is 64-bit, false otherwise * * Emulate an instruction protected by UMIP and provide a dummy result. The * result of the emulation is saved in @data. The size of the results depends @@ -202,11 +202,8 @@ static int identify_insn(struct insn *insn) * 0 on success, -EINVAL on error while emulating. */ static int emulate_umip_insn(struct insn *insn, int umip_inst, - unsigned char *data, int *data_size) + unsigned char *data, int *data_size, bool x86_64) { - unsigned long dummy_base_addr, dummy_value; - unsigned short dummy_limit = 0; - if (!data || !data_size || !insn) return -EINVAL; /* @@ -219,6 +216,9 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, * is always returned irrespective of the operand size. */ if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) { + u64 dummy_base_addr; + u16 dummy_limit = 0; + /* SGDT and SIDT do not use registers operands. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) return -EINVAL; @@ -228,13 +228,24 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, else dummy_base_addr = UMIP_DUMMY_IDT_BASE; - *data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE; + /* + * 64-bit processes use the entire dummy base address. + * 32-bit processes use the lower 32 bits of the base address. + * dummy_base_addr is always 64 bits, but we memcpy the correct + * number of bytes from it to the destination. + */ + if (x86_64) + *data_size = UMIP_GDT_IDT_BASE_SIZE_64BIT; + else + *data_size = UMIP_GDT_IDT_BASE_SIZE_32BIT; + + memcpy(data + 2, &dummy_base_addr, *data_size); - memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE); + *data_size += UMIP_GDT_IDT_LIMIT_SIZE; memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); } else if (umip_inst == UMIP_INST_SMSW) { - dummy_value = CR0_STATE; + unsigned long dummy_value = CR0_STATE; /* * Even though the CR0 register has 4 bytes, the number @@ -290,11 +301,10 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) * fixup_umip_exception() - Fixup a general protection fault caused by UMIP * @regs: Registers as saved when entering the #GP handler * - * The instructions sgdt, sidt, str, smsw, sldt cause a general protection - * fault if executed with CPL > 0 (i.e., from user space). If the offending - * user-space process is not in long mode, this function fixes the exception - * up and provides dummy results for sgdt, sidt and smsw; str and sldt are not - * fixed up. Also long mode user-space processes are not fixed up. + * The instructions SGDT, SIDT, STR, SMSW and SLDT cause a general protection + * fault if executed with CPL > 0 (i.e., from user space). This function fixes + * the exception up and provides dummy results for SGDT, SIDT and SMSW; STR + * and SLDT are not fixed up. * * If operands are memory addresses, results are copied to user-space memory as * indicated by the instruction pointed by eIP using the registers indicated in @@ -373,13 +383,14 @@ bool fixup_umip_exception(struct pt_regs *regs) umip_pr_warning(regs, "%s instruction cannot be used by applications.\n", umip_insns[umip_inst]); - /* Do not emulate SLDT, STR or user long mode processes. */ - if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs)) + /* Do not emulate (spoof) SLDT or STR. */ + if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT) return false; umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n"); - if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size)) + if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, + user_64bit_mode(regs))) return false; /* diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index d8359ebeea70..8cd745ef8c7b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -508,9 +508,12 @@ struct uprobe_xol_ops { void (*abort)(struct arch_uprobe *, struct pt_regs *); }; -static inline int sizeof_long(void) +static inline int sizeof_long(struct pt_regs *regs) { - return in_ia32_syscall() ? 4 : 8; + /* + * Check registers for mode as in_xxx_syscall() does not apply here. + */ + return user_64bit_mode(regs) ? 8 : 4; } static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) @@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) static int emulate_push_stack(struct pt_regs *regs, unsigned long val) { - unsigned long new_sp = regs->sp - sizeof_long(); + unsigned long new_sp = regs->sp - sizeof_long(regs); - if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) + if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs))) return -EFAULT; regs->sp = new_sp; @@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { - regs->sp += sizeof_long(); /* Pop incorrect return address */ + regs->sp += sizeof_long(regs); /* Pop incorrect return address */ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) return -ERESTART; } @@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) * "call" insn was executed out-of-line. Just restore ->sp and restart. * We could also restore ->ip and try to call branch_emulate_op() again. */ - regs->sp += sizeof_long(); + regs->sp += sizeof_long(regs); return -ERESTART; } @@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { - int rasize = sizeof_long(), nleft; + int rasize = sizeof_long(regs), nleft; unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) |