diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-10-13 12:00:26 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-10-13 12:00:26 +0200 |
commit | 3d0d0d9b1d805e39456a9d49443d847092cb21ab (patch) | |
tree | 6e99cda54e3db2f9ce039ac3d1269de2a81cc818 /arch/s390 | |
parent | Merge tag 'kvm-ppc-next-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/... (diff) | |
parent | Merge branch 'apv11' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390... (diff) | |
download | linux-3d0d0d9b1d805e39456a9d49443d847092cb21ab.tar.xz linux-3d0d0d9b1d805e39456a9d49443d847092cb21ab.zip |
Merge tag 'kvm-s390-next-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390/vfio-ap: Fixes and enhancements for vfio-ap
- add tracing
- fix a locking bug
- make local functions and data static
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 57 |
2 files changed, 55 insertions, 4 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index febd1709472a..d5d24889c3bc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -865,6 +865,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); void kvm_arch_crypto_clear_masks(struct kvm *kvm); +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm); extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b7964d77c6cd..fe24150ff666 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -859,8 +859,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); switch (attr->attr) { case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); @@ -868,8 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); @@ -877,16 +881,20 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); @@ -2056,6 +2064,46 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm) +{ + struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; + + mutex_lock(&kvm->lock); + kvm_s390_vcpu_block_all(kvm); + + switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { + case CRYCB_FORMAT2: /* APCB1 use 256 bits */ + memcpy(crycb->apcb1.apm, apm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", + apm[0], apm[1], apm[2], apm[3]); + memcpy(crycb->apcb1.aqm, aqm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", + aqm[0], aqm[1], aqm[2], aqm[3]); + memcpy(crycb->apcb1.adm, adm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", + adm[0], adm[1], adm[2], adm[3]); + break; + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: /* Fall through both use APCB0 */ + memcpy(crycb->apcb0.apm, apm, 8); + memcpy(crycb->apcb0.aqm, aqm, 2); + memcpy(crycb->apcb0.adm, adm, 2); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", + apm[0], *((unsigned short *)aqm), + *((unsigned short *)adm)); + break; + default: /* Can not happen */ + break; + } + + /* recreate the shadow crycb for each vcpu */ + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); + kvm_s390_vcpu_unblock_all(kvm); + mutex_unlock(&kvm->lock); +} +EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); + void kvm_arch_crypto_clear_masks(struct kvm *kvm) { mutex_lock(&kvm->lock); @@ -2066,6 +2114,7 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) memset(&kvm->arch.crypto.crycb->apcb1, 0, sizeof(kvm->arch.crypto.crycb->apcb1)); + VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); |