diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-05-11 14:06:32 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-05-11 14:06:32 +0200 |
commit | cba3d2765e9fcbce45fa3930c4d5f4da7652e752 (patch) | |
tree | 194fbda3ddde55ec0ebbbf5ed85f82fd9a8bf967 /arch/s390/kvm/kvm-s390.c | |
parent | KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages (diff) | |
parent | KVM: s390: drop handling of interception code 12 (diff) | |
download | linux-cba3d2765e9fcbce45fa3930c4d5f4da7652e752.tar.xz linux-cba3d2765e9fcbce45fa3930c4d5f4da7652e752.zip |
Merge tag 'kvm-s390-next-20150508' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fixes and features for 4.2 (kvm/next)
Mostly a bunch of fixes, reworks and optimizations for s390.
There is one new feature (EDAT-2 inside the guest), which boils
down to 2GB pages.
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 51 |
1 files changed, 33 insertions, 18 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2be391bb8557..d461f8a15c07 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { /* upper facilities limit for kvm */ unsigned long kvm_s390_fac_list_mask[] = { 0xffe6fffbfcfdfc40UL, - 0x005c800000000000UL, + 0x005e800000000000UL, }; unsigned long kvm_s390_fac_list_mask_size(void) @@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); kvm->arch.epoch = gtod - host_tod; - kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { + kvm_s390_vcpu_block_all(kvm); + kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; - exit_sie(cur_vcpu); - } + kvm_s390_vcpu_unblock_all(kvm); mutex_unlock(&kvm->lock); return 0; } @@ -1311,8 +1311,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM | - CPUSTAT_STOPPED | - CPUSTAT_GED); + CPUSTAT_STOPPED); + + if (test_kvm_facility(vcpu->kvm, 78)) + atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + else if (test_kvm_facility(vcpu->kvm, 8)) + atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_vcpu_setup_model(vcpu); vcpu->arch.sie_block->ecb = 6; @@ -1409,16 +1414,26 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) return kvm_s390_vcpu_has_irq(vcpu, 0); } -void s390_vcpu_block(struct kvm_vcpu *vcpu) +void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } -void s390_vcpu_unblock(struct kvm_vcpu *vcpu) +void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) { atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } +static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) +{ + atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); +} + +static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) +{ + atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); +} + /* * Kick a guest cpu out of SIE and wait until SIE is not running. * If the CPU is not running (e.g. waiting as idle) the function will @@ -1430,10 +1445,11 @@ void exit_sie(struct kvm_vcpu *vcpu) cpu_relax(); } -/* Kick a guest cpu out of SIE and prevent SIE-reentry */ -void exit_sie_sync(struct kvm_vcpu *vcpu) +/* Kick a guest cpu out of SIE to process a request synchronously */ +void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) { - s390_vcpu_block(vcpu); + kvm_make_request(req, vcpu); + kvm_s390_vcpu_request(vcpu); exit_sie(vcpu); } @@ -1447,8 +1463,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) /* match against both prefix pages */ if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); - kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu); } } } @@ -1720,8 +1735,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { + if (!vcpu->requests) + return 0; retry: - s390_vcpu_unblock(vcpu); + kvm_s390_vcpu_request_handled(vcpu); /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -2208,8 +2225,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); - kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu); } static void __disable_ibs_on_all_vcpus(struct kvm *kvm) @@ -2225,8 +2241,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm) static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); - kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); - exit_sie_sync(vcpu); + kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); } void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) |