diff options
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/Makefile | 8 | ||||
-rw-r--r-- | arch/mips/kvm/emulate.c | 332 | ||||
-rw-r--r-- | arch/mips/kvm/fpu.S | 122 | ||||
-rw-r--r-- | arch/mips/kvm/locore.S | 38 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 472 | ||||
-rw-r--r-- | arch/mips/kvm/msa.S | 161 | ||||
-rw-r--r-- | arch/mips/kvm/stats.c | 4 | ||||
-rw-r--r-- | arch/mips/kvm/tlb.c | 6 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 199 |
9 files changed, 1318 insertions, 24 deletions
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 401fe027c261..637ebbebd549 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -1,13 +1,15 @@ # Makefile for KVM support for MIPS # -common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm -kvm-objs := $(common-objs) mips.o emulate.o locore.o \ +common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o + +kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ interrupt.o stats.o commpage.o \ - dyntrans.o trap_emul.o + dyntrans.o trap_emul.o fpu.o obj-$(CONFIG_KVM) += kvm.o obj-y += callback.o tlb.o diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index fb3e8dfd1ff6..6230f376a44e 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) return EMULATE_DONE; } +/** + * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config1 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) +{ + unsigned int mask = 0; + + /* Permit FPU to be present if FPU is supported */ + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) + mask |= MIPS_CONF1_FP; + + return mask; +} + +/** + * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config3 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) +{ + /* Config4 is optional */ + unsigned int mask = MIPS_CONF_M; + + /* Permit MSA to be present if MSA is supported */ + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) + mask |= MIPS_CONF3_MSA; + + return mask; +} + +/** + * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config4 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) +{ + /* Config5 is optional */ + return MIPS_CONF_M; +} + +/** + * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config5 CP0 + * register, by the guest itself. + */ +unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) +{ + unsigned int mask = 0; + + /* Permit MSAEn changes if MSA supported and enabled */ + if (kvm_mips_guest_has_msa(&vcpu->arch)) + mask |= MIPS_CONF5_MSAEN; + + /* + * Permit guest FPU mode changes if FPU is enabled and the relevant + * feature exists according to FIR register. + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch)) { + if (cpu_has_fre) + mask |= MIPS_CONF5_FRE; + /* We don't support UFR or UFE */ + } + + return mask; +} + enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) @@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, kvm_mips_write_compare(vcpu, vcpu->arch.gprs[rt]); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { - kvm_write_c0_guest_status(cop0, - vcpu->arch.gprs[rt]); + unsigned int old_val, val, change; + + old_val = kvm_read_c0_guest_status(cop0); + val = vcpu->arch.gprs[rt]; + change = val ^ old_val; + + /* Make sure that the NMI bit is never set */ + val &= ~ST0_NMI; + + /* + * Don't allow CU1 or FR to be set unless FPU + * capability enabled and exists in guest + * configuration. + */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + val &= ~(ST0_CU1 | ST0_FR); + + /* + * Also don't allow FR to be set if host doesn't + * support it. + */ + if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + val &= ~ST0_FR; + + + /* Handle changes in FPU mode */ + preempt_disable(); + + /* + * FPU and Vector register state is made + * UNPREDICTABLE by a change of FR, so don't + * even bother saving it. + */ + if (change & ST0_FR) + kvm_drop_fpu(vcpu); + + /* + * If MSA state is already live, it is undefined + * how it interacts with FR=0 FPU state, and we + * don't want to hit reserved instruction + * exceptions trying to save the MSA state later + * when CU=1 && FR=1, so play it safe and save + * it first. + */ + if (change & ST0_CU1 && !(val & ST0_FR) && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + kvm_lose_fpu(vcpu); + /* - * Make sure that CU1 and NMI bits are - * never set + * Propagate CU1 (FPU enable) changes + * immediately if the FPU context is already + * loaded. When disabling we leave the context + * loaded so it can be quickly enabled again in + * the near future. */ - kvm_clear_c0_guest_status(cop0, - (ST0_CU1 | ST0_NMI)); + if (change & ST0_CU1 && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + change_c0_status(ST0_CU1, val); + + preempt_enable(); + + kvm_write_c0_guest_status(cop0, val); #ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mtc0(inst, opc, vcpu); + /* + * If FPU present, we need CU1/FR bits to take + * effect fairly soon. + */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + kvm_mips_trans_mtc0(inst, opc, vcpu); #endif + } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { + unsigned int old_val, val, change, wrmask; + + old_val = kvm_read_c0_guest_config5(cop0); + val = vcpu->arch.gprs[rt]; + + /* Only a few bits are writable in Config5 */ + wrmask = kvm_mips_config5_wrmask(vcpu); + change = (val ^ old_val) & wrmask; + val = old_val ^ change; + + + /* Handle changes in FPU/MSA modes */ + preempt_disable(); + + /* + * Propagate FRE changes immediately if the FPU + * context is already loaded. + */ + if (change & MIPS_CONF5_FRE && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + change_c0_config5(MIPS_CONF5_FRE, val); + + /* + * Propagate MSAEn changes immediately if the + * MSA context is already loaded. When disabling + * we leave the context loaded so it can be + * quickly enabled again in the near future. + */ + if (change & MIPS_CONF5_MSAEN && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + change_c0_config5(MIPS_CONF5_MSAEN, + val); + + preempt_enable(); + + kvm_write_c0_guest_config5(cop0, val); } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { uint32_t old_cause, new_cause; @@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, return er; } +enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TRAP << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver TRAP when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSAFPE << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_FPE << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver FPE when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSADIS << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSADIS when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + /* ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 @@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, case T_SYSCALL: case T_BREAK: case T_RES_INST: + case T_TRAP: + case T_MSAFPE: + case T_FPE: + case T_MSADIS: break; case T_COP_UNUSABLE: diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S new file mode 100644 index 000000000000..531fbf5131c0 --- /dev/null +++ b/arch/mips/kvm/fpu.S @@ -0,0 +1,122 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * FPU context handling code for KVM. + * + * Copyright (C) 2015 Imagination Technologies Ltd. + */ + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> + + .set noreorder + .set noat + +LEAF(__kvm_save_fpu) + .set push + .set mips64r2 + SET_HARDFLOAT + mfc0 t0, CP0_STATUS + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip odd doubles + nop + sdc1 $f1, VCPU_FPR1(a0) + sdc1 $f3, VCPU_FPR3(a0) + sdc1 $f5, VCPU_FPR5(a0) + sdc1 $f7, VCPU_FPR7(a0) + sdc1 $f9, VCPU_FPR9(a0) + sdc1 $f11, VCPU_FPR11(a0) + sdc1 $f13, VCPU_FPR13(a0) + sdc1 $f15, VCPU_FPR15(a0) + sdc1 $f17, VCPU_FPR17(a0) + sdc1 $f19, VCPU_FPR19(a0) + sdc1 $f21, VCPU_FPR21(a0) + sdc1 $f23, VCPU_FPR23(a0) + sdc1 $f25, VCPU_FPR25(a0) + sdc1 $f27, VCPU_FPR27(a0) + sdc1 $f29, VCPU_FPR29(a0) + sdc1 $f31, VCPU_FPR31(a0) +1: sdc1 $f0, VCPU_FPR0(a0) + sdc1 $f2, VCPU_FPR2(a0) + sdc1 $f4, VCPU_FPR4(a0) + sdc1 $f6, VCPU_FPR6(a0) + sdc1 $f8, VCPU_FPR8(a0) + sdc1 $f10, VCPU_FPR10(a0) + sdc1 $f12, VCPU_FPR12(a0) + sdc1 $f14, VCPU_FPR14(a0) + sdc1 $f16, VCPU_FPR16(a0) + sdc1 $f18, VCPU_FPR18(a0) + sdc1 $f20, VCPU_FPR20(a0) + sdc1 $f22, VCPU_FPR22(a0) + sdc1 $f24, VCPU_FPR24(a0) + sdc1 $f26, VCPU_FPR26(a0) + sdc1 $f28, VCPU_FPR28(a0) + jr ra + sdc1 $f30, VCPU_FPR30(a0) + .set pop + END(__kvm_save_fpu) + +LEAF(__kvm_restore_fpu) + .set push + .set mips64r2 + SET_HARDFLOAT + mfc0 t0, CP0_STATUS + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip odd doubles + nop + ldc1 $f1, VCPU_FPR1(a0) + ldc1 $f3, VCPU_FPR3(a0) + ldc1 $f5, VCPU_FPR5(a0) + ldc1 $f7, VCPU_FPR7(a0) + ldc1 $f9, VCPU_FPR9(a0) + ldc1 $f11, VCPU_FPR11(a0) + ldc1 $f13, VCPU_FPR13(a0) + ldc1 $f15, VCPU_FPR15(a0) + ldc1 $f17, VCPU_FPR17(a0) + ldc1 $f19, VCPU_FPR19(a0) + ldc1 $f21, VCPU_FPR21(a0) + ldc1 $f23, VCPU_FPR23(a0) + ldc1 $f25, VCPU_FPR25(a0) + ldc1 $f27, VCPU_FPR27(a0) + ldc1 $f29, VCPU_FPR29(a0) + ldc1 $f31, VCPU_FPR31(a0) +1: ldc1 $f0, VCPU_FPR0(a0) + ldc1 $f2, VCPU_FPR2(a0) + ldc1 $f4, VCPU_FPR4(a0) + ldc1 $f6, VCPU_FPR6(a0) + ldc1 $f8, VCPU_FPR8(a0) + ldc1 $f10, VCPU_FPR10(a0) + ldc1 $f12, VCPU_FPR12(a0) + ldc1 $f14, VCPU_FPR14(a0) + ldc1 $f16, VCPU_FPR16(a0) + ldc1 $f18, VCPU_FPR18(a0) + ldc1 $f20, VCPU_FPR20(a0) + ldc1 $f22, VCPU_FPR22(a0) + ldc1 $f24, VCPU_FPR24(a0) + ldc1 $f26, VCPU_FPR26(a0) + ldc1 $f28, VCPU_FPR28(a0) + jr ra + ldc1 $f30, VCPU_FPR30(a0) + .set pop + END(__kvm_restore_fpu) + +LEAF(__kvm_restore_fcsr) + .set push + SET_HARDFLOAT + lw t0, VCPU_FCR31(a0) + /* + * The ctc1 must stay at this offset in __kvm_restore_fcsr. + * See kvm_mips_csr_die_notify() which handles t0 containing a value + * which triggers an FP Exception, which must be stepped over and + * ignored since the set cause bits must remain there for the guest. + */ + ctc1 t0, fcr31 + jr ra + nop + .set pop + END(__kvm_restore_fcsr) diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 4a68b176d6e4..c567240386a0 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -36,6 +36,8 @@ #define PT_HOST_USERLOCAL PT_EPC #define CP0_DDATA_LO $28,3 +#define CP0_CONFIG3 $16,3 +#define CP0_CONFIG5 $16,5 #define CP0_EBASE $15,1 #define CP0_INTCTL $12,1 @@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) LONG_L k0, VCPU_HOST_EBASE(k1) mtc0 k0,CP0_EBASE + /* + * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't + * trigger FPE for pending exceptions. + */ + .set at + and v1, v0, ST0_CU1 + beqz v1, 1f + nop + .set push + SET_HARDFLOAT + cfc1 t0, fcr31 + sw t0, VCPU_FCR31(k1) + ctc1 zero,fcr31 + .set pop + .set noat +1: + +#ifdef CONFIG_CPU_HAS_MSA + /* + * If MSA is enabled, save MSACSR and clear it so that later + * instructions don't trigger MSAFPE for pending exceptions. + */ + mfc0 t0, CP0_CONFIG3 + ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */ + beqz t0, 1f + nop + mfc0 t0, CP0_CONFIG5 + ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */ + beqz t0, 1f + nop + _cfcmsa t0, MSA_CSR + sw t0, VCPU_MSA_CSR(k1) + _ctcmsa MSA_CSR, zero +1: +#endif + /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ .set at and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index c9eccf5df912..bb68e8d520e8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/err.h> +#include <linux/kdebug.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> @@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, + { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, + { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, + { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, + { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, @@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_CP0_STATUS, KVM_REG_MIPS_CP0_CAUSE, KVM_REG_MIPS_CP0_EPC, + KVM_REG_MIPS_CP0_PRID, KVM_REG_MIPS_CP0_CONFIG, KVM_REG_MIPS_CP0_CONFIG1, KVM_REG_MIPS_CP0_CONFIG2, KVM_REG_MIPS_CP0_CONFIG3, + KVM_REG_MIPS_CP0_CONFIG4, + KVM_REG_MIPS_CP0_CONFIG5, KVM_REG_MIPS_CP0_CONFIG7, KVM_REG_MIPS_CP0_ERROREPC, @@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_fpu_struct *fpu = &vcpu->arch.fpu; int ret; s64 v; + s64 vs[2]; + unsigned int idx; switch (reg->id) { + /* General purpose registers */ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; @@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, v = (long)vcpu->arch.pc; break; + /* Floating point registers */ + case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_32(0); + /* Odd singles in top of even double when FR=0 */ + if (kvm_read_c0_guest_status(cop0) & ST0_FR) + v = get_fpr32(&fpu->fpr[idx], 0); + else + v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); + break; + case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_64(0); + /* Can't access odd doubles in FR=0 mode */ + if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + v = get_fpr64(&fpu->fpr[idx], 0); + break; + case KVM_REG_MIPS_FCR_IR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + v = boot_cpu_data.fpu_id; + break; + case KVM_REG_MIPS_FCR_CSR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + v = fpu->fcr31; + break; + + /* MIPS SIMD Architecture (MSA) registers */ + case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + /* Can't access MSA registers in FR=0 mode */ + if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_VEC_128(0); +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* least significant byte first */ + vs[0] = get_fpr64(&fpu->fpr[idx], 0); + vs[1] = get_fpr64(&fpu->fpr[idx], 1); +#else + /* most significant byte first */ + vs[0] = get_fpr64(&fpu->fpr[idx], 1); + vs[1] = get_fpr64(&fpu->fpr[idx], 0); +#endif + break; + case KVM_REG_MIPS_MSA_IR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + v = boot_cpu_data.msa_id; + break; + case KVM_REG_MIPS_MSA_CSR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + v = fpu->msacsr; + break; + + /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: v = (long)kvm_read_c0_guest_index(cop0); break; @@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_EPC: v = (long)kvm_read_c0_guest_epc(cop0); break; - case KVM_REG_MIPS_CP0_ERROREPC: - v = (long)kvm_read_c0_guest_errorepc(cop0); + case KVM_REG_MIPS_CP0_PRID: + v = (long)kvm_read_c0_guest_prid(cop0); break; case KVM_REG_MIPS_CP0_CONFIG: v = (long)kvm_read_c0_guest_config(cop0); @@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_CONFIG3: v = (long)kvm_read_c0_guest_config3(cop0); break; + case KVM_REG_MIPS_CP0_CONFIG4: + v = (long)kvm_read_c0_guest_config4(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG5: + v = (long)kvm_read_c0_guest_config5(cop0); + break; case KVM_REG_MIPS_CP0_CONFIG7: v = (long)kvm_read_c0_guest_config7(cop0); break; + case KVM_REG_MIPS_CP0_ERROREPC: + v = (long)kvm_read_c0_guest_errorepc(cop0); + break; /* registers to be handled specially */ case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_COUNT_CTL: @@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, u32 v32 = (u32)v; return put_user(v32, uaddr32); + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + + return copy_to_user(uaddr, vs, 16); } else { return -EINVAL; } @@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; - u64 v; + struct mips_fpu_struct *fpu = &vcpu->arch.fpu; + s64 v; + s64 vs[2]; + unsigned int idx; if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; @@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, if (get_user(v32, uaddr32) != 0) return -EFAULT; v = (s64)v32; + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + + return copy_from_user(vs, uaddr, 16); } else { return -EINVAL; } switch (reg->id) { + /* General purpose registers */ case KVM_REG_MIPS_R0: /* Silently ignore requests to set $0 */ break; @@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, vcpu->arch.pc = v; break; + /* Floating point registers */ + case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_32(0); + /* Odd singles in top of even double when FR=0 */ + if (kvm_read_c0_guest_status(cop0) & ST0_FR) + set_fpr32(&fpu->fpr[idx], 0, v); + else + set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); + break; + case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_64(0); + /* Can't access odd doubles in FR=0 mode */ + if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + set_fpr64(&fpu->fpr[idx], 0, v); + break; + case KVM_REG_MIPS_FCR_IR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + /* Read-only */ + break; + case KVM_REG_MIPS_FCR_CSR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + fpu->fcr31 = v; + break; + + /* MIPS SIMD Architecture (MSA) registers */ + case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_VEC_128(0); +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* least significant byte first */ + set_fpr64(&fpu->fpr[idx], 0, vs[0]); + set_fpr64(&fpu->fpr[idx], 1, vs[1]); +#else + /* most significant byte first */ + set_fpr64(&fpu->fpr[idx], 1, vs[0]); + set_fpr64(&fpu->fpr[idx], 0, vs[1]); +#endif + break; + case KVM_REG_MIPS_MSA_IR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + /* Read-only */ + break; + case KVM_REG_MIPS_MSA_CSR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + fpu->msacsr = v; + break; + + /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: kvm_write_c0_guest_index(cop0, v); break; @@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_EPC: kvm_write_c0_guest_epc(cop0, v); break; + case KVM_REG_MIPS_CP0_PRID: + kvm_write_c0_guest_prid(cop0, v); + break; case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; @@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_CP0_COMPARE: case KVM_REG_MIPS_CP0_CAUSE: + case KVM_REG_MIPS_CP0_CONFIG: + case KVM_REG_MIPS_CP0_CONFIG1: + case KVM_REG_MIPS_CP0_CONFIG2: + case KVM_REG_MIPS_CP0_CONFIG3: + case KVM_REG_MIPS_CP0_CONFIG4: + case KVM_REG_MIPS_CP0_CONFIG5: case KVM_REG_MIPS_COUNT_CTL: case KVM_REG_MIPS_COUNT_RESUME: case KVM_REG_MIPS_COUNT_HZ: @@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, return 0; } +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + int r = 0; + + if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) + return -EINVAL; + if (cap->flags) + return -EINVAL; + if (cap->args[0]) + return -EINVAL; + + switch (cap->cap) { + case KVM_CAP_MIPS_FPU: + vcpu->arch.fpu_enabled = true; + break; + case KVM_CAP_MIPS_MSA: + vcpu->arch.msa_enabled = true; + break; + default: + r = -EINVAL; + break; + } + + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + goto out; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } default: r = -ENOIOCTLCMD; } @@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) switch (ext) { case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; + case KVM_CAP_MIPS_FPU: + r = !!cpu_has_fpu; + break; + case KVM_CAP_MIPS_MSA: + /* + * We don't support MSA vector partitioning yet: + * 1) It would require explicit support which can't be tested + * yet due to lack of support in current hardware. + * 2) It extends the state that would need to be saved/restored + * by e.g. QEMU for migration. + * + * When vector partitioning hardware becomes available, support + * could be added by requiring a flag when enabling + * KVM_CAP_MIPS_MSA capability to indicate that userland knows + * to save/restore the appropriate extra state. + */ + r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); + break; default: r = 0; break; @@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ret = kvm_mips_callbacks->handle_break(vcpu); break; + case T_TRAP: + ++vcpu->stat.trap_inst_exits; + trace_kvm_exit(vcpu, TRAP_INST_EXITS); + ret = kvm_mips_callbacks->handle_trap(vcpu); + break; + + case T_MSAFPE: + ++vcpu->stat.msa_fpe_exits; + trace_kvm_exit(vcpu, MSA_FPE_EXITS); + ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); + break; + + case T_FPE: + ++vcpu->stat.fpe_exits; + trace_kvm_exit(vcpu, FPE_EXITS); + ret = kvm_mips_callbacks->handle_fpe(vcpu); + break; + + case T_MSADIS: + ++vcpu->stat.msa_disabled_exits; + trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); + ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); + break; + default: kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, @@ -1146,12 +1386,233 @@ skip_emul: } } + if (ret == RESUME_GUEST) { + /* + * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context + * is live), restore FCR31 / MSACSR. + * + * This should be before returning to the guest exception + * vector, as it may well cause an [MSA] FP exception if there + * are pending exception bits unmasked. (see + * kvm_mips_csr_die_notifier() for how that is handled). + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch) && + read_c0_status() & ST0_CU1) + __kvm_restore_fcsr(&vcpu->arch); + + if (kvm_mips_guest_has_msa(&vcpu->arch) && + read_c0_config5() & MIPS_CONF5_MSAEN) + __kvm_restore_msacsr(&vcpu->arch); + } + /* Disable HTW before returning to guest or host */ htw_stop(); return ret; } +/* Enable FPU for guest and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned int sr, cfg5; + + preempt_disable(); + + sr = kvm_read_c0_guest_status(cop0); + + /* + * If MSA state is already live, it is undefined how it interacts with + * FR=0 FPU state, and we don't want to hit reserved instruction + * exceptions trying to save the MSA state later when CU=1 && FR=1, so + * play it safe and save it first. + * + * In theory we shouldn't ever hit this case since kvm_lose_fpu() should + * get called when guest CU1 is set, however we can't trust the guest + * not to clobber the status register directly via the commpage. + */ + if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + kvm_lose_fpu(vcpu); + + /* + * Enable FPU for guest + * We set FR and FRE according to guest context + */ + change_c0_status(ST0_CU1 | ST0_FR, sr); + if (cpu_has_fre) { + cfg5 = kvm_read_c0_guest_config5(cop0); + change_c0_config5(MIPS_CONF5_FRE, cfg5); + } + enable_fpu_hazard(); + + /* If guest FPU state not active, restore it now */ + if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { + __kvm_restore_fpu(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + } + + preempt_enable(); +} + +#ifdef CONFIG_CPU_HAS_MSA +/* Enable MSA for guest and restore context */ +void kvm_own_msa(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned int sr, cfg5; + + preempt_disable(); + + /* + * Enable FPU if enabled in guest, since we're restoring FPU context + * anyway. We set FR and FRE according to guest context. + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch)) { + sr = kvm_read_c0_guest_status(cop0); + + /* + * If FR=0 FPU state is already live, it is undefined how it + * interacts with MSA state, so play it safe and save it first. + */ + if (!(sr & ST0_FR) && + (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | + KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) + kvm_lose_fpu(vcpu); + + change_c0_status(ST0_CU1 | ST0_FR, sr); + if (sr & ST0_CU1 && cpu_has_fre) { + cfg5 = kvm_read_c0_guest_config5(cop0); + change_c0_config5(MIPS_CONF5_FRE, cfg5); + } + } + + /* Enable MSA for guest */ + set_c0_config5(MIPS_CONF5_MSAEN); + enable_fpu_hazard(); + + switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { + case KVM_MIPS_FPU_FPU: + /* + * Guest FPU state already loaded, only restore upper MSA state + */ + __kvm_restore_msa_upper(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + break; + case 0: + /* Neither FPU or MSA already active, restore full MSA state */ + __kvm_restore_msa(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + if (kvm_mips_guest_has_fpu(&vcpu->arch)) + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + break; + default: + break; + } + + preempt_enable(); +} +#endif + +/* Drop FPU & MSA without saving it */ +void kvm_drop_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + disable_msa(); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; + } + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + clear_c0_status(ST0_CU1 | ST0_FR); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + } + preempt_enable(); +} + +/* Save and disable FPU & MSA */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + /* + * FPU & MSA get disabled in root context (hardware) when it is disabled + * in guest context (software), but the register state in the hardware + * may still be in use. This is why we explicitly re-enable the hardware + * before saving. + */ + + preempt_disable(); + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + set_c0_config5(MIPS_CONF5_MSAEN); + enable_fpu_hazard(); + + __kvm_save_msa(&vcpu->arch); + + /* Disable MSA & FPU */ + disable_msa(); + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + clear_c0_status(ST0_CU1 | ST0_FR); + vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); + } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + set_c0_status(ST0_CU1); + enable_fpu_hazard(); + + __kvm_save_fpu(&vcpu->arch); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + + /* Disable FPU */ + clear_c0_status(ST0_CU1 | ST0_FR); + } + preempt_enable(); +} + +/* + * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are + * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP + * exception if cause bits are set in the value being written. + */ +static int kvm_mips_csr_die_notify(struct notifier_block *self, + unsigned long cmd, void *ptr) +{ + struct die_args *args = (struct die_args *)ptr; + struct pt_regs *regs = args->regs; + unsigned long pc; + + /* Only interested in FPE and MSAFPE */ + if (cmd != DIE_FP && cmd != DIE_MSAFP) + return NOTIFY_DONE; + + /* Return immediately if guest context isn't active */ + if (!(current->flags & PF_VCPU)) + return NOTIFY_DONE; + + /* Should never get here from user mode */ + BUG_ON(user_mode(regs)); + + pc = instruction_pointer(regs); + switch (cmd) { + case DIE_FP: + /* match 2nd instruction in __kvm_restore_fcsr */ + if (pc != (unsigned long)&__kvm_restore_fcsr + 4) + return NOTIFY_DONE; + break; + case DIE_MSAFP: + /* match 2nd/3rd instruction in __kvm_restore_msacsr */ + if (!cpu_has_msa || + pc < (unsigned long)&__kvm_restore_msacsr + 4 || + pc > (unsigned long)&__kvm_restore_msacsr + 8) + return NOTIFY_DONE; + break; + } + + /* Move PC forward a little and continue executing */ + instruction_pointer(regs) += 4; + + return NOTIFY_STOP; +} + +static struct notifier_block kvm_mips_csr_die_notifier = { + .notifier_call = kvm_mips_csr_die_notify, +}; + int __init kvm_mips_init(void) { int ret; @@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void) if (ret) return ret; + register_die_notifier(&kvm_mips_csr_die_notifier); + /* * On MIPS, kernel modules are executed from "mapped space", which * requires TLBs. The TLB handling code is statically linked with @@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void) kvm_mips_release_pfn_clean = kvm_release_pfn_clean; kvm_mips_is_error_pfn = is_error_pfn; - pr_info("KVM/MIPS Initialized\n"); return 0; } @@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void) kvm_mips_release_pfn_clean = NULL; kvm_mips_is_error_pfn = NULL; - pr_info("KVM/MIPS unloaded\n"); + unregister_die_notifier(&kvm_mips_csr_die_notifier); } module_init(kvm_mips_init); diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S new file mode 100644 index 000000000000..d02f0c6cc2cc --- /dev/null +++ b/arch/mips/kvm/msa.S @@ -0,0 +1,161 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * MIPS SIMD Architecture (MSA) context handling code for KVM. + * + * Copyright (C) 2015 Imagination Technologies Ltd. + */ + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> + + .set noreorder + .set noat + +LEAF(__kvm_save_msa) + st_d 0, VCPU_FPR0, a0 + st_d 1, VCPU_FPR1, a0 + st_d 2, VCPU_FPR2, a0 + st_d 3, VCPU_FPR3, a0 + st_d 4, VCPU_FPR4, a0 + st_d 5, VCPU_FPR5, a0 + st_d 6, VCPU_FPR6, a0 + st_d 7, VCPU_FPR7, a0 + st_d 8, VCPU_FPR8, a0 + st_d 9, VCPU_FPR9, a0 + st_d 10, VCPU_FPR10, a0 + st_d 11, VCPU_FPR11, a0 + st_d 12, VCPU_FPR12, a0 + st_d 13, VCPU_FPR13, a0 + st_d 14, VCPU_FPR14, a0 + st_d 15, VCPU_FPR15, a0 + st_d 16, VCPU_FPR16, a0 + st_d 17, VCPU_FPR17, a0 + st_d 18, VCPU_FPR18, a0 + st_d 19, VCPU_FPR19, a0 + st_d 20, VCPU_FPR20, a0 + st_d 21, VCPU_FPR21, a0 + st_d 22, VCPU_FPR22, a0 + st_d 23, VCPU_FPR23, a0 + st_d 24, VCPU_FPR24, a0 + st_d 25, VCPU_FPR25, a0 + st_d 26, VCPU_FPR26, a0 + st_d 27, VCPU_FPR27, a0 + st_d 28, VCPU_FPR28, a0 + st_d 29, VCPU_FPR29, a0 + st_d 30, VCPU_FPR30, a0 + st_d 31, VCPU_FPR31, a0 + jr ra + nop + END(__kvm_save_msa) + +LEAF(__kvm_restore_msa) + ld_d 0, VCPU_FPR0, a0 + ld_d 1, VCPU_FPR1, a0 + ld_d 2, VCPU_FPR2, a0 + ld_d 3, VCPU_FPR3, a0 + ld_d 4, VCPU_FPR4, a0 + ld_d 5, VCPU_FPR5, a0 + ld_d 6, VCPU_FPR6, a0 + ld_d 7, VCPU_FPR7, a0 + ld_d 8, VCPU_FPR8, a0 + ld_d 9, VCPU_FPR9, a0 + ld_d 10, VCPU_FPR10, a0 + ld_d 11, VCPU_FPR11, a0 + ld_d 12, VCPU_FPR12, a0 + ld_d 13, VCPU_FPR13, a0 + ld_d 14, VCPU_FPR14, a0 + ld_d 15, VCPU_FPR15, a0 + ld_d 16, VCPU_FPR16, a0 + ld_d 17, VCPU_FPR17, a0 + ld_d 18, VCPU_FPR18, a0 + ld_d 19, VCPU_FPR19, a0 + ld_d 20, VCPU_FPR20, a0 + ld_d 21, VCPU_FPR21, a0 + ld_d 22, VCPU_FPR22, a0 + ld_d 23, VCPU_FPR23, a0 + ld_d 24, VCPU_FPR24, a0 + ld_d 25, VCPU_FPR25, a0 + ld_d 26, VCPU_FPR26, a0 + ld_d 27, VCPU_FPR27, a0 + ld_d 28, VCPU_FPR28, a0 + ld_d 29, VCPU_FPR29, a0 + ld_d 30, VCPU_FPR30, a0 + ld_d 31, VCPU_FPR31, a0 + jr ra + nop + END(__kvm_restore_msa) + + .macro kvm_restore_msa_upper wr, off, base + .set push + .set noat +#ifdef CONFIG_64BIT + ld $1, \off(\base) + insert_d \wr, 1 +#elif defined(CONFIG_CPU_LITTLE_ENDIAN) + lw $1, \off(\base) + insert_w \wr, 2 + lw $1, (\off+4)(\base) + insert_w \wr, 3 +#else /* CONFIG_CPU_BIG_ENDIAN */ + lw $1, (\off+4)(\base) + insert_w \wr, 2 + lw $1, \off(\base) + insert_w \wr, 3 +#endif + .set pop + .endm + +LEAF(__kvm_restore_msa_upper) + kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 + kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 + kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 + kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 + kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 + kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 + kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 + kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 + kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 + kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 + kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 + kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 + kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 + kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 + kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 + kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 + kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 + kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 + kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 + kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 + kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 + kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 + kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 + kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 + kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 + kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 + kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 + kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 + kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 + kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 + kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 + kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 + jr ra + nop + END(__kvm_restore_msa_upper) + +LEAF(__kvm_restore_msacsr) + lw t0, VCPU_MSA_CSR(a0) + /* + * The ctcmsa must stay at this offset in __kvm_restore_msacsr. + * See kvm_mips_csr_die_notify() which handles t0 containing a value + * which triggers an MSA FP Exception, which must be stepped over and + * ignored since the set cause bits must remain there for the guest. + */ + _ctcmsa MSA_CSR, t0 + jr ra + nop + END(__kvm_restore_msacsr) diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index a74d6024c5ad..888bb67070ac 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c @@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { "System Call", "Reserved Inst", "Break Inst", + "Trap Inst", + "MSA FPE", + "FPE", + "MSA Disabled", "D-Cache Flushes", }; diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index b6beb0e07b1b..aed0ac2a4972 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } } + /* restore guest state to registers */ + kvm_mips_callbacks->vcpu_set_regs(vcpu); + local_irq_restore(flags); } @@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->arch.preempt_entryhi = read_c0_entryhi(); vcpu->arch.last_sched_cpu = cpu; + /* save guest state in registers */ + kvm_mips_callbacks->vcpu_get_regs(vcpu); + if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index fd7257b70e65..d836ed5b0bc7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) { + struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_run *run = vcpu->run; uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) - er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); - else + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { + /* FPU Unusable */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch) || + (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { + /* + * Unusable/no FPU in guest: + * deliver guest COP1 Unusable Exception + */ + er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); + } else { + /* Restore FPU state */ + kvm_own_fpu(vcpu); + er = EMULATE_DONE; + } + } else { er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + } switch (er) { case EMULATE_DONE: @@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } +static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +/** + * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use MSA when it is disabled. + */ +static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (!kvm_mips_guest_has_msa(&vcpu->arch) || + (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { + /* + * No MSA in guest, or FPU enabled and not in FR=1 mode, + * guest reserved instruction exception + */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { + /* MSA disabled by guest, guest MSA disabled exception */ + er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); + } else { + /* Restore MSA/FPU state */ + kvm_own_msa(vcpu); + er = EMULATE_DONE; + } + + switch (er) { + case EMULATE_DONE: + ret = RESUME_GUEST; + break; + + case EMULATE_FAIL: + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + default: + BUG(); + } + return ret; +} + static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) * guest will come up as expected, for now we simulate a MIPS 24kc */ kvm_write_c0_guest_prid(cop0, 0x00019300); - kvm_write_c0_guest_config(cop0, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + /* Have config1, Cacheable, noncoherent, write-back, write allocate */ + kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | + (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT)); /* Read the cache characteristics from the host Config1 Register */ @@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) (1 << CP0C1_WR) | (1 << CP0C1_CA)); kvm_write_c0_guest_config1(cop0, config1); - kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); - /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ - kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | - (1 << CP0C3_ULRI)); + /* Have config3, no tertiary/secondary caches implemented */ + kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); + /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ + + /* Have config4, UserLocal */ + kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); + + /* Have config5 */ + kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); + + /* No config6 */ + kvm_write_c0_guest_config5(cop0, 0); /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); @@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, { struct mips_coproc *cop0 = vcpu->arch.cop0; int ret = 0; + unsigned int cur, change; switch (reg->id) { case KVM_REG_MIPS_CP0_COUNT: @@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, kvm_write_c0_guest_cause(cop0, v); } break; + case KVM_REG_MIPS_CP0_CONFIG: + /* read-only for now */ + break; + case KVM_REG_MIPS_CP0_CONFIG1: + cur = kvm_read_c0_guest_config1(cop0); + change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config1(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG2: + /* read-only for now */ + break; + case KVM_REG_MIPS_CP0_CONFIG3: + cur = kvm_read_c0_guest_config3(cop0); + change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config3(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG4: + cur = kvm_read_c0_guest_config4(cop0); + change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config4(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG5: + cur = kvm_read_c0_guest_config5(cop0); + change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config5(cop0, v); + } + break; case KVM_REG_MIPS_COUNT_CTL: ret = kvm_mips_set_count_ctl(vcpu, v); break; @@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, return ret; } +static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) +{ + kvm_lose_fpu(vcpu); + + return 0; +} + +static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) +{ + return 0; +} + static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { /* exit handlers */ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, @@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .handle_syscall = kvm_trap_emul_handle_syscall, .handle_res_inst = kvm_trap_emul_handle_res_inst, .handle_break = kvm_trap_emul_handle_break, + .handle_trap = kvm_trap_emul_handle_trap, + .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, + .handle_fpe = kvm_trap_emul_handle_fpe, + .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, .vm_init = kvm_trap_emul_vm_init, .vcpu_init = kvm_trap_emul_vcpu_init, @@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .irq_clear = kvm_mips_irq_clear_cb, .get_one_reg = kvm_trap_emul_get_one_reg, .set_one_reg = kvm_trap_emul_set_one_reg, + .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, + .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |