summaryrefslogtreecommitdiffstats
path: root/arch/x86/hyperv
diff options
context:
space:
mode:
authorYi Sun <yi.y.sun@linux.intel.com>2018-10-08 10:29:34 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-10-09 14:21:39 +0200
commit3a025de64bf89c84a79909069e3c24ad9e710d27 (patch)
tree04adc9cf615d5644da69374dbf9d70b4a33cb63d /arch/x86/hyperv
parentx86/hyperv: Add GUEST_IDLE_MSR support (diff)
downloadlinux-3a025de64bf89c84a79909069e3c24ad9e710d27.tar.xz
linux-3a025de64bf89c84a79909069e3c24ad9e710d27.zip
x86/hyperv: Enable PV qspinlock for Hyper-V
Implement the required wait and kick callbacks to support PV spinlocks in Hyper-V guests. [ tglx: Document the requirement for disabling interrupts in the wait() callback. Remove goto and unnecessary includes. Add prototype for hv_vcpu_is_preempted(). Adapted to pending paravirt changes. ] Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Juergen Gross <jgross@suse.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Michael Kelley (EOSG) <Michael.H.Kelley@microsoft.com> Cc: chao.p.peng@intel.com Cc: chao.gao@intel.com Cc: isaku.yamahata@intel.com Cc: tianyu.lan@microsoft.com Link: https://lkml.kernel.org/r/1538987374-51217-3-git-send-email-yi.y.sun@linux.intel.com
Diffstat (limited to 'arch/x86/hyperv')
-rw-r--r--arch/x86/hyperv/Makefile4
-rw-r--r--arch/x86/hyperv/hv_spinlock.c88
2 files changed, 92 insertions, 0 deletions
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index b21ee65c4101..1c11f9420a82 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,2 +1,6 @@
obj-y := hv_init.o mmu.o nested.o
obj-$(CONFIG_X86_64) += hv_apic.o
+
+ifdef CONFIG_X86_64
+obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
+endif
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
new file mode 100644
index 000000000000..a861b0456b1a
--- /dev/null
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V specific spinlock code.
+ *
+ * Copyright (C) 2018, Intel, Inc.
+ *
+ * Author : Yi Sun <yi.y.sun@intel.com>
+ */
+
+#define pr_fmt(fmt) "Hyper-V: " fmt
+
+#include <linux/spinlock.h>
+
+#include <asm/mshyperv.h>
+#include <asm/paravirt.h>
+#include <asm/apic.h>
+
+static bool __initdata hv_pvspin = true;
+
+static void hv_qlock_kick(int cpu)
+{
+ apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
+}
+
+static void hv_qlock_wait(u8 *byte, u8 val)
+{
+ unsigned long msr_val;
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ /*
+ * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
+ * vCPU can be put into 'idle' state. This 'idle' state is
+ * terminated by an IPI, usually from hv_qlock_kick(), even if
+ * interrupts are disabled on the vCPU.
+ *
+ * To prevent a race against the unlock path it is required to
+ * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
+ * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
+ * the lock value check and the rdmsrl() then the vCPU might be put
+ * into 'idle' state by the hypervisor and kept in that state for
+ * an unspecified amount of time.
+ */
+ local_irq_save(flags);
+ /*
+ * Only issue the rdmsrl() when the lock state has not changed.
+ */
+ if (READ_ONCE(*byte) == val)
+ rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+ local_irq_restore(flags);
+}
+
+/*
+ * Hyper-V does not support this so far.
+ */
+bool hv_vcpu_is_preempted(int vcpu)
+{
+ return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
+
+void __init hv_init_spinlocks(void)
+{
+ if (!hv_pvspin || !apic ||
+ !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
+ !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
+ pr_info("PV spinlocks disabled\n");
+ return;
+ }
+ pr_info("PV spinlocks enabled\n");
+
+ __pv_init_lock_hash();
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops.lock.wait = hv_qlock_wait;
+ pv_ops.lock.kick = hv_qlock_kick;
+ pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
+}
+
+static __init int hv_parse_nopvspin(char *arg)
+{
+ hv_pvspin = false;
+ return 0;
+}
+early_param("hv_nopvspin", hv_parse_nopvspin);