summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.ibm.com>2024-02-20 09:56:34 +0100
committerHeiko Carstens <hca@linux.ibm.com>2024-02-21 15:09:13 +0100
commit4a5993287467d2d0401503256dc9d2690c7f2020 (patch)
treeb0cfbebe89c5e2b9c571dd203b8098f7b1327598
parents390: compile relocatable kernel without -fPIE (diff)
downloadlinux-4a5993287467d2d0401503256dc9d2690c7f2020.tar.xz
linux-4a5993287467d2d0401503256dc9d2690c7f2020.zip
KVM: s390: introduce kvm_s390_fpu_(store|load)
It's a bit nicer than having multiple lines and will help if there's another re-work since we'll only have to change one location. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c18
-rw-r--r--arch/s390/kvm/kvm-s390.h18
3 files changed, 22 insertions, 20 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c81708acd1f4..dc721d50a942 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -584,11 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
mci.val = mchk->mcic;
/* take care of lazy register loading */
- fpu_stfpc(&vcpu->run->s.regs.fpc);
- if (cpu_has_vx())
- save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
- else
- save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+ kvm_s390_fpu_store(vcpu->run);
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8c222b0dfbf2..6500f80a7086 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4949,11 +4949,7 @@ static void sync_regs(struct kvm_vcpu *vcpu)
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
- fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
- if (cpu_has_vx())
- load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
- else
- load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+ kvm_s390_fpu_load(vcpu->run);
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu);
@@ -5014,11 +5010,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
- fpu_stfpc(&vcpu->run->s.regs.fpc);
- if (cpu_has_vx())
- save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
- else
- save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+ kvm_s390_fpu_store(vcpu->run);
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu);
}
@@ -5167,11 +5159,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
- fpu_stfpc(&vcpu->run->s.regs.fpc);
- if (cpu_has_vx())
- save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
- else
- save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+ kvm_s390_fpu_store(vcpu->run);
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a7ea80cfa445..111eb5c74784 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -20,6 +20,24 @@
#include <asm/processor.h>
#include <asm/sclp.h>
+static inline void kvm_s390_fpu_store(struct kvm_run *run)
+{
+ fpu_stfpc(&run->s.regs.fpc);
+ if (cpu_has_vx())
+ save_vx_regs((__vector128 *)&run->s.regs.vrs);
+ else
+ save_fp_regs((freg_t *)&run->s.regs.fprs);
+}
+
+static inline void kvm_s390_fpu_load(struct kvm_run *run)
+{
+ fpu_lfpc_safe(&run->s.regs.fpc);
+ if (cpu_has_vx())
+ load_vx_regs((__vector128 *)&run->s.regs.vrs);
+ else
+ load_fp_regs((freg_t *)&run->s.regs.fprs);
+}
+
/* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1