diff options
author | Konstantin Weitz <WEITZKON@de.ibm.com> | 2012-04-25 15:30:38 +0200 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-05-01 02:38:31 +0200 |
commit | 41628d334361670d825fb03c04568f5ef9f084dc (patch) | |
tree | 88b2eadd0f7de12f1d8b226e4491532b8783ee94 /virt | |
parent | KVM: x86: Run PIT work in own kthread (diff) | |
download | linux-41628d334361670d825fb03c04568f5ef9f084dc.tar.xz linux-41628d334361670d825fb03c04568f5ef9f084dc.zip |
KVM: s390: Implement the directed yield (diag 9c) hypervisor call for KVM
This patch implements the directed yield hypercall found on other
System z hypervisors. It delegates execution time to the virtual cpu
specified in the instruction's parameter.
Useful to avoid long spinlock waits in the guest.
Christian Borntraeger: moved common code in virt/kvm/
Signed-off-by: Konstantin Weitz <WEITZKON@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 42 |
1 files changed, 26 insertions, 16 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1847c762d8d9..7e140683ff14 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1543,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_resched); +bool kvm_vcpu_yield_to(struct kvm_vcpu *target) +{ + struct pid *pid; + struct task_struct *task = NULL; + + rcu_read_lock(); + pid = rcu_dereference(target->pid); + if (pid) + task = get_pid_task(target->pid, PIDTYPE_PID); + rcu_read_unlock(); + if (!task) + return false; + if (task->flags & PF_VCPU) { + put_task_struct(task); + return false; + } + if (yield_to(task, 1)) { + put_task_struct(task); + return true; + } + put_task_struct(task); + return false; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); + void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; @@ -1561,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) */ for (pass = 0; pass < 2 && !yielded; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { - struct task_struct *task = NULL; - struct pid *pid; if (!pass && i < last_boosted_vcpu) { i = last_boosted_vcpu; continue; @@ -1572,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (waitqueue_active(&vcpu->wq)) continue; - rcu_read_lock(); - pid = rcu_dereference(vcpu->pid); - if (pid) - task = get_pid_task(vcpu->pid, PIDTYPE_PID); - rcu_read_unlock(); - if (!task) - continue; - if (task->flags & PF_VCPU) { - put_task_struct(task); - continue; - } - if (yield_to(task, 1)) { - put_task_struct(task); + if (kvm_vcpu_yield_to(vcpu)) { kvm->last_boosted_vcpu = i; yielded = 1; break; } - put_task_struct(task); } } } |