summaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-09-29 16:27:24 +0200
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-10-13 15:50:34 +0200
commit5a3d883a59b3fe8dc8775c7a79200a5b11a6761e (patch)
treea1b60ce1821f86e3a6fc30e32ba3228d5525f6f6 /arch/s390/kvm/kvm-s390.c
parentKVM: s390: correctly handle injection of pgm irqs and per events (diff)
downloadlinux-5a3d883a59b3fe8dc8775c7a79200a5b11a6761e.tar.xz
linux-5a3d883a59b3fe8dc8775c7a79200a5b11a6761e.zip
KVM: s390: switch to get_tod_clock() and fix STP sync races
Nobody except early.c makes use of store_tod_clock() to handle the cc. So if we would get a cc != 0, we would be in more trouble. Let's replace all users with get_tod_clock(). Returning a cc on an ioctl sounded strange either way. We can now also easily move the get_tod_clock() call into the preempt_disable() section. This is in fact necessary to make the STP sync work as expected. Otherwise the host TOD could change and we would end up with a wrong epoch calculation. Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0a67c40eece9..a0907795f31d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -523,19 +523,14 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
struct kvm_vcpu *cur_vcpu;
unsigned int vcpu_idx;
- u64 host_tod, gtod;
- int r;
+ u64 gtod;
if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
return -EFAULT;
- r = store_tod_clock(&host_tod);
- if (r)
- return r;
-
mutex_lock(&kvm->lock);
preempt_disable();
- kvm->arch.epoch = gtod - host_tod;
+ kvm->arch.epoch = gtod - get_tod_clock();
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
@@ -581,15 +576,10 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
{
- u64 host_tod, gtod;
- int r;
-
- r = store_tod_clock(&host_tod);
- if (r)
- return r;
+ u64 gtod;
preempt_disable();
- gtod = host_tod + kvm->arch.epoch;
+ gtod = get_tod_clock() + kvm->arch.epoch;
preempt_enable();
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;