summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-07-22 10:22:53 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2014-07-22 10:22:53 +0200
commitc756ad036fc8ef432438e89aab63cc723eda39b4 (patch)
tree16bf4d2d280f624ff2ce99f5d02aeb7f002863f6
parentKVM: x86: DR6/7.RTM cannot be written (diff)
parentKVM: s390: add ipte to trace event decoding (diff)
downloadlinux-c756ad036fc8ef432438e89aab63cc723eda39b4.tar.xz
linux-c756ad036fc8ef432438e89aab63cc723eda39b4.zip
Merge tag 'kvm-s390-20140721' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
Bugfixes -------- - add IPTE to trace event decoder - document and advertise KVM_CAP_S390_IRQCHIP Cleanups -------- - Reuse kvm_vcpu_block for s390 - Get rid of tasklet for wakup processing
-rw-r--r--Documentation/virtual/kvm/api.txt27
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/uapi/asm/sie.h1
-rw-r--r--arch/s390/kvm/interrupt.c100
-rw-r--r--arch/s390/kvm/kvm-s390.c18
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/sigp.c36
7 files changed, 82 insertions, 104 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 587972ca12c5..68cda1fc3d52 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2934,15 +2934,18 @@ The fields in each entry are defined as follows:
6. Capabilities that can be enabled
-----------------------------------
-There are certain capabilities that change the behavior of the virtual CPU when
-enabled. To enable them, please see section 4.37. Below you can find a list of
-capabilities and what their effect on the vCPU is when enabling them.
+There are certain capabilities that change the behavior of the virtual CPU or
+the virtual machine when enabled. To enable them, please see section 4.37.
+Below you can find a list of capabilities and what their effect on the vCPU or
+the virtual machine is when enabling them.
The following information is provided along with the description:
Architectures: which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
+ Target: whether this is a per-vcpu or per-vm capability.
+
Parameters: what parameters are accepted by the capability.
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
@@ -2952,6 +2955,7 @@ The following information is provided along with the description:
6.1 KVM_CAP_PPC_OSI
Architectures: ppc
+Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error
@@ -2966,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
6.2 KVM_CAP_PPC_PAPR
Architectures: ppc
+Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error
@@ -2985,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
6.3 KVM_CAP_SW_TLB
Architectures: ppc
+Target: vcpu
Parameters: args[0] is the address of a struct kvm_config_tlb
Returns: 0 on success; -1 on error
@@ -3027,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
6.4 KVM_CAP_S390_CSS_SUPPORT
Architectures: s390
+Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error
@@ -3038,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
SUBCHANNEL intercepts.
+Note that even though this capability is enabled per-vcpu, the complete
+virtual machine is affected.
+
6.5 KVM_CAP_PPC_EPR
Architectures: ppc
+Target: vcpu
Parameters: args[0] defines whether the proxy facility is active
Returns: 0 on success; -1 on error
@@ -3066,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device.
6.7 KVM_CAP_IRQ_XICS
Architectures: ppc
+Target: vcpu
Parameters: args[0] is the XICS device fd
args[1] is the XICS CPU number (server ID) for this vcpu
This capability connects the vcpu to an in-kernel XICS device.
+
+6.8 KVM_CAP_S390_IRQCHIP
+
+Architectures: s390
+Target: vm
+Parameters: none
+
+This capability enables the in-kernel irqchip for s390. Please refer to
+"4.24 KVM_CREATE_IRQCHIP" for details.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c2ba0208a0e1..773bef7614d8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
struct list_head list;
atomic_t active;
struct kvm_s390_float_interrupt *float_int;
- int timer_due; /* event indicator for waitqueue below */
wait_queue_head_t *wq;
atomic_t *cpuflags;
unsigned int action_bits;
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
s390_fp_regs guest_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
- struct tasklet_struct tasklet;
struct kvm_s390_pgm_info pgm;
union {
struct cpuid cpu_id;
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 5d9cc19462c4..d4096fdfc6ab 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -108,6 +108,7 @@
exit_code_ipa0(0xB2, 0x17, "STETR"), \
exit_code_ipa0(0xB2, 0x18, "PC"), \
exit_code_ipa0(0xB2, 0x20, "SERVC"), \
+ exit_code_ipa0(0xB2, 0x21, "IPTE"), \
exit_code_ipa0(0xB2, 0x28, "PT"), \
exit_code_ipa0(0xB2, 0x29, "ISKE"), \
exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 90c8de22a2a0..1be3d8da49e9 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int rc = 0;
if (atomic_read(&li->active)) {
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_for_each_entry(inti, &li->list, list)
if (__interrupt_is_deliverable(vcpu, inti)) {
rc = 1;
break;
}
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
}
if ((!rc) && atomic_read(&fi->active)) {
@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
u64 now, sltime;
- DECLARE_WAITQUEUE(wait, current);
vcpu->stat.exit_wait_state++;
- if (kvm_cpu_has_interrupt(vcpu))
- return 0;
- __set_cpu_idle(vcpu);
- spin_lock_bh(&vcpu->arch.local_int.lock);
- vcpu->arch.local_int.timer_due = 0;
- spin_unlock_bh(&vcpu->arch.local_int.lock);
+ /* fast path */
+ if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
+ return 0;
if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
- __unset_cpu_idle(vcpu);
return -EOPNOTSUPP; /* disabled wait */
}
+ __set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
goto no_timer;
}
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
- if (vcpu->arch.sie_block->ckc < now) {
- __unset_cpu_idle(vcpu);
- return 0;
- }
-
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- spin_lock(&vcpu->arch.local_int.float_int->lock);
- spin_lock_bh(&vcpu->arch.local_int.lock);
- add_wait_queue(&vcpu->wq, &wait);
- while (list_empty(&vcpu->arch.local_int.list) &&
- list_empty(&vcpu->arch.local_int.float_int->list) &&
- (!vcpu->arch.local_int.timer_due) &&
- !signal_pending(current) &&
- !kvm_s390_si_ext_call_pending(vcpu)) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_bh(&vcpu->arch.local_int.lock);
- spin_unlock(&vcpu->arch.local_int.float_int->lock);
- schedule();
- spin_lock(&vcpu->arch.local_int.float_int->lock);
- spin_lock_bh(&vcpu->arch.local_int.lock);
- }
+ kvm_vcpu_block(vcpu);
__unset_cpu_idle(vcpu);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&vcpu->wq, &wait);
- spin_unlock_bh(&vcpu->arch.local_int.lock);
- spin_unlock(&vcpu->arch.local_int.float_int->lock);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
return 0;
}
-void kvm_s390_tasklet(unsigned long parm)
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
-
- spin_lock(&vcpu->arch.local_int.lock);
- vcpu->arch.local_int.timer_due = 1;
- if (waitqueue_active(&vcpu->wq))
+ if (waitqueue_active(&vcpu->wq)) {
+ /*
+ * The vcpu gave up the cpu voluntarily, mark it as a good
+ * yield-candidate.
+ */
+ vcpu->preempted = true;
wake_up_interruptible(&vcpu->wq);
- spin_unlock(&vcpu->arch.local_int.lock);
+ }
}
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
- vcpu->preempted = true;
- tasklet_schedule(&vcpu->arch.tasklet);
+ kvm_s390_vcpu_wakeup(vcpu);
return HRTIMER_NORESTART;
}
@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_interrupt_info *n, *inti = NULL;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) {
list_del(&inti->list);
kfree(inti);
}
atomic_set(&li->active, 0);
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) {
do {
deliver = 0;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) {
if (__interrupt_is_deliverable(vcpu, inti)) {
list_del(&inti->list);
@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
if (list_empty(&li->list))
atomic_set(&li->active, 0);
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
if (deliver) {
__do_deliver_interrupt(vcpu, inti);
kfree(inti);
@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) {
do {
deliver = 0;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) {
if ((inti->type == KVM_S390_MCHK) &&
__interrupt_is_deliverable(vcpu, inti)) {
@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
}
if (list_empty(&li->list))
atomic_set(&li->active, 0);
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
if (deliver) {
__do_deliver_interrupt(vcpu, inti);
kfree(inti);
@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_add(&inti->list, &li->list);
atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq));
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
return 0;
}
@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
inti->type = KVM_S390_PROGRAM_INT;
memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
list_add(&inti->list, &li->list);
atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq));
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
return 0;
}
@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
- if (waitqueue_active(li->wq))
- wake_up_interruptible(li->wq);
- kvm_get_vcpu(kvm, sigcpu)->preempted = true;
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
+ kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi:
spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock);
@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
mutex_lock(&vcpu->kvm->lock);
li = &vcpu->arch.local_int;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
if (inti->type == KVM_S390_PROGRAM_INT)
list_add(&inti->list, &li->list);
else
@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
if (inti->type == KVM_S390_SIGP_STOP)
li->action_bits |= ACTION_STOP_ON_STOP;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
- vcpu->preempted = true;
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
mutex_unlock(&vcpu->kvm->lock);
+ kvm_s390_vcpu_wakeup(vcpu);
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fdf88f7a539c..339b34a02fb8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -166,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM:
+ case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
r = 1;
@@ -649,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return rc;
}
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
- (unsigned long) vcpu);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff;
@@ -1068,6 +1067,9 @@ retry:
goto retry;
}
+ /* nothing to do, just clear the request */
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+
return 0;
}
@@ -1475,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */
- spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+ spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
for (i = 0; i < online_vcpus; i++) {
@@ -1501,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
* Let's play safe and flush the VCPU at startup.
*/
vcpu->arch.sie_block->ihcpu = 0xffff;
- spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+ spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return;
}
@@ -1515,17 +1517,17 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */
- spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+ spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Need to lock access to action_bits to avoid a SIGP race condition */
- spin_lock_bh(&vcpu->arch.local_int.lock);
+ spin_lock(&vcpu->arch.local_int.lock);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
vcpu->arch.local_int.action_bits &=
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
- spin_unlock_bh(&vcpu->arch.local_int.lock);
+ spin_unlock(&vcpu->arch.local_int.lock);
__disable_ibs_on_vcpu(vcpu);
@@ -1544,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
__enable_ibs_on_vcpu(started_vcpu);
}
- spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+ spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 33a0e4bed2a5..3862fa2cefe0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -136,8 +136,8 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
-void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index fd7fb5c5ef5d..c6f1c2bc9753 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
+static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{
+ struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
struct kvm_s390_interrupt_info *inti;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
@@ -135,7 +136,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
return -ENOMEM;
inti->type = KVM_S390_SIGP_STOP;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
rc = SIGP_CC_BUSY;
@@ -151,17 +152,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
atomic_set(&li->active, 1);
li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
- if (waitqueue_active(li->wq))
- wake_up_interruptible(li->wq);
+ kvm_s390_vcpu_wakeup(dst_vcpu);
out:
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
return rc;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
- struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL;
int rc;
@@ -171,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
- li = &dst_vcpu->arch.local_int;
- rc = __inject_sigp_stop(li, action);
+ rc = __inject_sigp_stop(dst_vcpu, action);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
@@ -243,7 +241,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
if (!inti)
return SIGP_CC_BUSY;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
@@ -258,13 +256,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
- if (waitqueue_active(li->wq))
- wake_up_interruptible(li->wq);
+ kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
return rc;
}
@@ -280,9 +277,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
- spin_lock_bh(&dst_vcpu->arch.local_int.lock);
+ spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
- spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
+ spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -343,10 +340,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
- spin_lock_bh(&li->lock);
+ spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY;
- spin_unlock_bh(&li->lock);
+ spin_unlock(&li->lock);
return rc;
}
@@ -466,12 +463,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL);
- spin_lock_bh(&dest_vcpu->arch.local_int.lock);
- if (waitqueue_active(&dest_vcpu->wq))
- wake_up_interruptible(&dest_vcpu->wq);
- dest_vcpu->preempted = true;
- spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
-
+ kvm_s390_vcpu_wakeup(dest_vcpu);
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
return 0;
}