summaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig16
-rw-r--r--arch/mips/kvm/Makefile5
-rw-r--r--arch/mips/kvm/emulate.c580
-rw-r--r--arch/mips/kvm/entry.c19
-rw-r--r--arch/mips/kvm/interrupt.c93
-rw-r--r--arch/mips/kvm/interrupt.h14
-rw-r--r--arch/mips/kvm/loongson_ipi.c214
-rw-r--r--arch/mips/kvm/mips.c138
-rw-r--r--arch/mips/kvm/mmu.c67
-rw-r--r--arch/mips/kvm/tlb.c46
-rw-r--r--arch/mips/kvm/trap_emul.c119
-rw-r--r--arch/mips/kvm/vz.c280
13 files changed, 1188 insertions, 434 deletions
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
deleted file mode 100644
index 51617e481aa3..000000000000
--- a/arch/mips/kvm/00README.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-KVM/MIPS Trap & Emulate Release Notes
-=====================================
-
-(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
- Malta Board with FPGA based 34K
- Sigma Designs TangoX board with a 24K based 8654 SoC.
- Malta Board with 74K @ 1GHz
-
-(2) Both Guest kernel and Guest Userspace execute in UM.
- Guest User address space: 0x00000000 -> 0x40000000
- Guest Kernel Unmapped: 0x40000000 -> 0x60000000
- Guest Kernel Mapped: 0x60000000 -> 0x80000000
-
- Guest Usermode virtual memory is limited to 1GB.
-
-(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
- Note that due to cache aliasing issues, 4K page sizes are NOT supported.
-
-(3) No HugeTLB Support
- Both the host kernel and Guest kernel should have the page size set to 16K.
- This will be implemented in a future release.
-
-(4) KVM/MIPS does not have support for SMP Guests
- Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
- LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
- when we ERET back to the guest. This causes the guest to hang in an infinite loop.
- This will be fixed in a future release.
-
-(5) Use Host FPU
- Currently KVM/MIPS emulates a 24K CPU without a FPU.
- This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b91d145aa2d5..032b3fca6cbb 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -6,7 +6,7 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- ---help---
+ help
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
@@ -22,11 +22,12 @@ config KVM
select EXPORT_UASM
select PREEMPT_NOTIFIERS
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select HAVE_KVM_EVENTFD
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select MMU_NOTIFIER
select SRCU
- ---help---
+ help
Support for hosting Guest kernels.
choice
@@ -36,16 +37,17 @@ choice
config KVM_MIPS_TE
bool "Trap & Emulate"
- ---help---
+ depends on CPU_MIPS32_R2
+ help
Use trap and emulate to virtualize 32-bit guests in user mode. This
does not require any special hardware Virtualization support beyond
- standard MIPS32/64 r2 or later, but it does require the guest kernel
+ standard MIPS32 r2 or later, but it does require the guest kernel
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
user address segment.
config KVM_MIPS_VZ
bool "MIPS Virtualization (VZ) ASE"
- ---help---
+ help
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
but requires hardware support.
@@ -56,7 +58,7 @@ config KVM_MIPS_DYN_TRANS
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
depends on KVM_MIPS_TE
default y
- ---help---
+ help
When running in Trap & Emulate mode patch privileged
instructions to reduce the number of traps.
@@ -65,7 +67,7 @@ config KVM_MIPS_DYN_TRANS
config KVM_MIPS_DEBUG_COP0_COUNTERS
bool "Maintain counters for COP0 accesses"
depends on KVM
- ---help---
+ help
Maintain statistics for Guest COP0 accesses.
A histogram of COP0 accesses is printed when the VM is
shutdown.
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 01affc1d21c5..506c4ac0ba1c 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
@@ -13,6 +13,9 @@ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o
+ifdef CONFIG_CPU_LOONGSON64
+kvm-objs += loongson_ipi.o
+endif
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 754094b40a75..d70c4f8e14e2 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -64,7 +64,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
switch (insn.r_format.func) {
case jalr_op:
arch->gprs[insn.r_format.rd] = epc + 8;
- /* Fall through */
+ fallthrough;
case jr_op:
nextpc = arch->gprs[insn.r_format.rs];
break;
@@ -140,7 +140,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
/* These are unconditional and in j_format. */
case jal_op:
arch->gprs[31] = instpc + 8;
- /* fall through */
+ fallthrough;
case j_op:
epc += 4;
epc >>= 28;
@@ -1262,7 +1262,6 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -1597,12 +1596,14 @@ dont_update_pc:
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
u32 rt;
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
+ unsigned int imme;
unsigned long curr_pc;
/*
@@ -1660,15 +1661,231 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
+ case swl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
+ (vcpu->arch.gprs[rt] >> 24);
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
+ (vcpu->arch.gprs[rt] >> 16);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
+ (vcpu->arch.gprs[rt] >> 8);
+ break;
+ case 3:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case swr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case sdl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
+ ((vcpu->arch.gprs[rt] >> 56) & 0xff);
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
+ ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
+ ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
+ ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
+ ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
+ ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
+ ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
+ break;
+ case 7:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+ case sdr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
+ (vcpu->arch.gprs[rt] << 32);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
+ (vcpu->arch.gprs[rt] << 40);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
+ (vcpu->arch.gprs[rt] << 48);
+ break;
+ case 7:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
+ (vcpu->arch.gprs[rt] << 56);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+#endif
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case sdc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x0 gssbx: store 1 bytes from GPR
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+ default:
+ kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
goto out_fail;
}
- run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
+ run->mmio.is_write = 1;
vcpu->mmio_is_write = 1;
+
+ r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, data);
+
+ if (!r) {
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
out_fail:
@@ -1678,12 +1895,14 @@ out_fail:
}
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
- u32 cause, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+ u32 cause, struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
+ int r;
enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
+ unsigned int imme;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
@@ -1716,7 +1935,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
case lwu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
#endif
case lw_op:
run->mmio.len = 4;
@@ -1724,18 +1943,176 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
case lhu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
case lh_op:
run->mmio.len = 2;
break;
case lbu_op:
vcpu->mmio_needed = 1; /* unsigned */
- /* fall through */
+ fallthrough;
case lb_op:
run->mmio.len = 1;
break;
+ case lwl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 3; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 4; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 5; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 6; /* 4 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case lwr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 7; /* 4 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 8; /* 3 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 9; /* 2 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 10; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case ldl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 11; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 12; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 13; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 14; /* 4 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 15; /* 5 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 16; /* 6 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 17; /* 7 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 18; /* 8 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 19; /* 8 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 20; /* 7 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 21; /* 6 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 22; /* 5 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 23; /* 4 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 24; /* 3 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 25; /* 2 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 26; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+#endif
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case ldc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x0 gslbx: store 1 bytes from GPR
+ * 0x1 gslhx: store 2 bytes from GPR
+ * 0x2 gslwx: store 4 bytes from GPR
+ * 0x3 gsldx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 27; /* signed */
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 28; /* signed */
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 29; /* signed */
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ vcpu->mmio_needed = 30; /* signed */
+ break;
+ default:
+ kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
+
default:
kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
@@ -1745,6 +2122,16 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
+
+ r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, run->mmio.data);
+
+ if (!r) {
+ kvm_mips_complete_mmio_load(vcpu);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
}
@@ -1752,7 +2139,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
unsigned long curr_pc,
unsigned long addr,
- struct kvm_run *run,
struct kvm_vcpu *vcpu,
u32 cause)
{
@@ -1780,23 +2166,22 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
/* no matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
+ kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT;
case KVM_MIPS_TLBINV:
/* invalid matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
+ kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT;
default:
break;
- };
+ }
}
}
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
@@ -1886,7 +2271,7 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
* guest's behalf.
*/
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -1899,11 +2284,11 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
} else if (op_inst == Hit_Invalidate_I) {
/* Perform the icache synchronisation on the guest's behalf */
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
er = kvm_mips_guest_cache_op(protected_flush_icache_line,
- curr_pc, va, run, vcpu, cause);
+ curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE)
goto done;
@@ -1929,7 +2314,6 @@ done:
}
enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
union mips_instruction inst;
@@ -1945,14 +2329,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
switch (inst.r_format.opcode) {
case cop0_op:
- er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+ er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+ er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
break;
#else
case spec3_op:
@@ -1960,12 +2344,12 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
case cache6_op:
++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, run,
+ er = kvm_mips_emulate_cache(inst, opc, cause,
vcpu);
break;
default:
goto unknown;
- };
+ }
break;
unknown:
#endif
@@ -2000,7 +2384,6 @@ long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emulate_syscall(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2035,7 +2418,6 @@ enum emulation_result kvm_mips_emulate_syscall(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2079,7 +2461,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2121,7 +2502,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2163,7 +2543,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2204,7 +2583,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2244,7 +2622,6 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2273,7 +2650,6 @@ enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2308,7 +2684,6 @@ enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2343,7 +2718,6 @@ enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2378,7 +2752,6 @@ enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2413,7 +2786,6 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2448,7 +2820,6 @@ enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2482,7 +2853,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
}
enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2571,12 +2941,12 @@ emulate_ri:
* branch target), and pass the RI exception to the guest OS.
*/
vcpu->arch.pc = curr_pc;
- return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
}
-enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
@@ -2591,28 +2961,125 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 8:
- *gpr = *(s64 *)run->mmio.data;
+ switch (vcpu->mmio_needed) {
+ case 11:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xff) << 56);
+ break;
+ case 12:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffff) << 48);
+ break;
+ case 13:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
+ break;
+ case 14:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
+ break;
+ case 15:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
+ break;
+ case 16:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
+ break;
+ case 17:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
+ break;
+ case 18:
+ case 19:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 20:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
+ break;
+ case 21:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
+ break;
+ case 22:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
+ break;
+ case 23:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
+ ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
+ break;
+ case 24:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
+ ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
+ break;
+ case 25:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
+ ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
+ break;
+ case 26:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
+ ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
+ break;
+ default:
+ *gpr = *(s64 *)run->mmio.data;
+ }
break;
case 4:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s32 *)run->mmio.data;
- else
+ switch (vcpu->mmio_needed) {
+ case 1:
*gpr = *(u32 *)run->mmio.data;
+ break;
+ case 2:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 3:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s32 *)run->mmio.data) & 0xff) << 24);
+ break;
+ case 4:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s32 *)run->mmio.data) & 0xffff) << 16);
+ break;
+ case 5:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
+ break;
+ case 6:
+ case 7:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 8:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
+ ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
+ break;
+ case 9:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
+ ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
+ break;
+ case 10:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
+ ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
+ break;
+ default:
+ *gpr = *(s32 *)run->mmio.data;
+ }
break;
case 2:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s16 *) run->mmio.data;
- else
+ if (vcpu->mmio_needed == 1)
*gpr = *(u16 *)run->mmio.data;
+ else
+ *gpr = *(s16 *)run->mmio.data;
break;
case 1:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s8 *) run->mmio.data;
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u8 *)run->mmio.data;
else
- *gpr = *(u8 *) run->mmio.data;
+ *gpr = *(s8 *)run->mmio.data;
break;
}
@@ -2622,7 +3089,6 @@ done:
static enum emulation_result kvm_mips_emulate_exc(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2660,7 +3126,6 @@ static enum emulation_result kvm_mips_emulate_exc(u32 cause,
enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
@@ -2742,7 +3207,7 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
}
if (er == EMULATE_PRIV_FAIL)
- kvm_mips_emulate_exc(cause, opc, run, vcpu);
+ kvm_mips_emulate_exc(cause, opc, vcpu);
return er;
}
@@ -2756,7 +3221,6 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
*/
enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc,
- struct kvm_run *run,
struct kvm_vcpu *vcpu,
bool write_fault)
{
@@ -2780,9 +3244,9 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
KVM_ENTRYHI_ASID));
if (index < 0) {
if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
} else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
exccode);
@@ -2797,10 +3261,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
*/
if (!TLB_IS_VALID(*tlb, va)) {
if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+ er = kvm_mips_emulate_tlbinv_ld(cause, opc,
vcpu);
} else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+ er = kvm_mips_emulate_tlbinv_st(cause, opc,
vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..fd716942e302 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -56,6 +56,7 @@
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
+#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
@@ -307,7 +308,10 @@ static void *kvm_mips_build_enter_guest(void *addr)
#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
- UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(&p, K0, C0_PWBASE);
+ else
+ UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
@@ -469,8 +473,10 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+#endif
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -490,6 +496,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
preempt_disable();
+#ifdef CONFIG_CPU_LOONGSON64
+ UASM_i_MFC0(&p, K1, C0_PGD);
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+#else
/*
* Now for the actual refill bit. A lot of this can be common with the
* Linux TLB refill handler, however we don't need to handle so many
@@ -512,6 +528,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
preempt_enable();
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 7257e8b6f5a9..d28c2c9c343e 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -61,27 +61,8 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
* the EXC code will be set when we are actually
* delivering the interrupt:
*/
- switch (intr) {
- case 2:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
+ kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -89,26 +70,8 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
{
int intr = (int)irq->irq;
- switch (intr) {
- case -2:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
+ kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
/* Deliver the interrupt of the corresponding priority, if possible. */
@@ -116,50 +79,20 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
int allowed = 0;
- u32 exccode;
+ u32 exccode, ie;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
- switch (priority) {
- case MIPS_EXC_INT_TIMER:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IO:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_1:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_2:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
+ if (priority == MIPS_EXC_MAX)
+ return 0;
- default:
- break;
+ ie = 1 << (kvm_priority_to_irq[priority] + 8);
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & ie)) {
+ allowed = 1;
+ exccode = EXCCODE_INT;
}
/* Are we allowed to deliver the interrupt ??? */
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 3bf0a49725e8..c3e878ca3e07 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -21,11 +21,12 @@
#define MIPS_EXC_NMI 5
#define MIPS_EXC_MCHK 6
#define MIPS_EXC_INT_TIMER 7
-#define MIPS_EXC_INT_IO 8
-#define MIPS_EXC_EXECUTE 9
-#define MIPS_EXC_INT_IPI_1 10
-#define MIPS_EXC_INT_IPI_2 11
-#define MIPS_EXC_MAX 12
+#define MIPS_EXC_INT_IO_1 8
+#define MIPS_EXC_INT_IO_2 9
+#define MIPS_EXC_EXECUTE 10
+#define MIPS_EXC_INT_IPI_1 11
+#define MIPS_EXC_INT_IPI_2 12
+#define MIPS_EXC_MAX 13
/* XXXSL More to follow */
#define C_TI (_ULCAST_(1) << 30)
@@ -38,6 +39,9 @@
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
+extern u32 *kvm_priority_to_irq;
+u32 kvm_irq_to_priority(u32 irq);
+
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
new file mode 100644
index 000000000000..3681fc8fba38
--- /dev/null
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-3 Virtual IPI interrupt support.
+ *
+ * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
+ *
+ * Authors: Chen Zhu <zhuchen@loongson.cn>
+ * Authors: Huacai Chen <chenhc@lemote.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#define IPI_BASE 0x3ff01000ULL
+
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+
+#define CORE1_STATUS_OFF 0x100
+#define CORE1_EN_OFF 0x104
+#define CORE1_SET_OFF 0x108
+#define CORE1_CLEAR_OFF 0x10c
+#define CORE1_BUF_20 0x120
+#define CORE1_BUF_28 0x128
+#define CORE1_BUF_30 0x130
+#define CORE1_BUF_38 0x138
+
+#define CORE2_STATUS_OFF 0x200
+#define CORE2_EN_OFF 0x204
+#define CORE2_SET_OFF 0x208
+#define CORE2_CLEAR_OFF 0x20c
+#define CORE2_BUF_20 0x220
+#define CORE2_BUF_28 0x228
+#define CORE2_BUF_30 0x230
+#define CORE2_BUF_38 0x238
+
+#define CORE3_STATUS_OFF 0x300
+#define CORE3_EN_OFF 0x304
+#define CORE3_SET_OFF 0x308
+#define CORE3_CLEAR_OFF 0x30c
+#define CORE3_BUF_20 0x320
+#define CORE3_BUF_28 0x328
+#define CORE3_BUF_30 0x330
+#define CORE3_BUF_38 0x338
+
+static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t offset = addr & 0xff;
+ void *pbuf;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ *(uint64_t *)val = s->status;
+ break;
+
+ case CORE0_EN_OFF:
+ *(uint64_t *)val = s->en;
+ break;
+
+ case CORE0_SET_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_CLEAR_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)val = *(uint64_t *)pbuf;
+ else /* Assume len == 4 */
+ *(uint32_t *)val = *(uint32_t *)pbuf;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, const void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t data, offset = addr & 0xff;
+ void *pbuf;
+ struct kvm *kvm = ipi->kvm;
+ struct kvm_mips_interrupt irq;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ data = *(uint64_t *)val;
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ break;
+
+ case CORE0_EN_OFF:
+ s->en = data;
+ break;
+
+ case CORE0_SET_OFF:
+ s->status |= data;
+ irq.cpu = id;
+ irq.irq = 6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ break;
+
+ case CORE0_CLEAR_OFF:
+ s->status &= ~data;
+ if (!s->status) {
+ irq.cpu = id;
+ irq.irq = -6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ }
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)pbuf = (uint64_t)data;
+ else /* Assume len == 4 */
+ *(uint32_t *)pbuf = (uint32_t)data;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_read(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_write(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+void kvm_init_loongson_ipi(struct kvm *kvm)
+{
+ int i;
+ unsigned long addr;
+ struct loongson_kvm_ipi *s;
+ struct kvm_io_device *device;
+
+ s = &kvm->arch.ipi;
+ s->kvm = kvm;
+ spin_lock_init(&s->lock);
+
+ /*
+ * Initialize IPI device
+ */
+ for (i = 0; i < 4; i++) {
+ device = &s->dev_ipi[i].device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ addr = (((unsigned long)i) << 44) + IPI_BASE;
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
+ mutex_unlock(&kvm->slots_lock);
+ s->dev_ipi[i].ipi = s;
+ s->dev_ipi[i].node_id = i;
+ }
+}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8f05dd0a0f4e..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -19,13 +19,13 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <linux/kvm_host.h>
@@ -39,40 +39,44 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
- { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
- { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
- { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
- { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
- { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
- { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
- { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
- { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
- { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
- { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
- { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
- { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
- { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
- { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
- { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
- { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
- { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+ VCPU_STAT("wait", wait_exits),
+ VCPU_STAT("cache", cache_exits),
+ VCPU_STAT("signal", signal_exits),
+ VCPU_STAT("interrupt", int_exits),
+ VCPU_STAT("cop_unusable", cop_unusable_exits),
+ VCPU_STAT("tlbmod", tlbmod_exits),
+ VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
+ VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
+ VCPU_STAT("addrerr_st", addrerr_st_exits),
+ VCPU_STAT("addrerr_ld", addrerr_ld_exits),
+ VCPU_STAT("syscall", syscall_exits),
+ VCPU_STAT("resvd_inst", resvd_inst_exits),
+ VCPU_STAT("break_inst", break_inst_exits),
+ VCPU_STAT("trap_inst", trap_inst_exits),
+ VCPU_STAT("msa_fpe", msa_fpe_exits),
+ VCPU_STAT("fpe", fpe_exits),
+ VCPU_STAT("msa_disabled", msa_disabled_exits),
+ VCPU_STAT("flush_dcache", flush_dcache_exits),
#ifdef CONFIG_KVM_MIPS_VZ
- { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
- { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
- { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
- { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
- { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
- { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
- { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
- { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
+ VCPU_STAT("vz_gpsi", vz_gpsi_exits),
+ VCPU_STAT("vz_gsfc", vz_gsfc_exits),
+ VCPU_STAT("vz_hc", vz_hc_exits),
+ VCPU_STAT("vz_grr", vz_grr_exits),
+ VCPU_STAT("vz_gva", vz_gva_exits),
+ VCPU_STAT("vz_ghfc", vz_ghfc_exits),
+ VCPU_STAT("vz_gpa", vz_gpa_exits),
+ VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
- { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
+#endif
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
{NULL}
};
@@ -80,13 +84,13 @@ bool kvm_trace_guest_mode_change;
int kvm_guest_mode_change_trace_reg(void)
{
- kvm_trace_guest_mode_change = 1;
+ kvm_trace_guest_mode_change = true;
return 0;
}
void kvm_guest_mode_change_trace_unreg(void)
{
- kvm_trace_guest_mode_change = 0;
+ kvm_trace_guest_mode_change = false;
}
/*
@@ -128,9 +132,13 @@ int kvm_arch_check_processor_compat(void *opaque)
return 0;
}
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
@@ -147,6 +155,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
+
return 0;
}
@@ -284,8 +296,7 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu->arch.wait = 0;
- if (swq_has_sleeper(&vcpu->wq))
- swake_up_one(&vcpu->wq);
+ rcuwait_wake_up(&vcpu->wait);
return kvm_mips_count_timeout(vcpu);
}
@@ -439,7 +450,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -449,11 +460,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
- kvm_mips_complete_mmio_load(vcpu, run);
+ kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
}
- if (run->immediate_exit)
+ if (vcpu->run->immediate_exit)
goto out;
lose_fpu(1);
@@ -470,7 +481,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(run, vcpu);
+ r = kvm_mips_callbacks->vcpu_run(vcpu);
trace_kvm_out(vcpu);
guest_exit_irqoff();
@@ -489,7 +500,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
@@ -498,10 +512,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
else
dvcpu = vcpu->kvm->vcpus[irq->cpu];
- if (intr == 2 || intr == 3 || intr == 4) {
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
- } else if (intr == -2 || intr == -3 || intr == -4) {
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
@@ -511,8 +525,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
dvcpu->arch.wait = 0;
- if (swq_has_sleeper(&dvcpu->wq))
- swake_up_one(&dvcpu->wq);
+ rcuwait_wake_up(&dvcpu->wait);
return 0;
}
@@ -1224,7 +1237,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
* end up causing an exception to be delivered to the Guest
* Kernel
*/
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ er = kvm_mips_check_privilege(cause, opc, vcpu);
if (er == EMULATE_PRIV_FAIL) {
goto skip_emul;
} else if (er == EMULATE_FAIL) {
@@ -1373,7 +1386,7 @@ skip_emul:
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- kvm_mips_callbacks->vcpu_reenter(run, vcpu);
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
@@ -1620,6 +1633,34 @@ static struct notifier_block kvm_mips_csr_die_notifier = {
.notifier_call = kvm_mips_csr_die_notify,
};
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
static int __init kvm_mips_init(void)
{
int ret;
@@ -1638,6 +1679,9 @@ static int __init kvm_mips_init(void)
if (ret)
return ret;
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
register_die_notifier(&kvm_mips_csr_die_notifier);
return 0;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7dad7a293eae..28c366d307e7 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -25,41 +25,9 @@
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
/**
@@ -153,7 +121,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pmd = mmu_memory_cache_alloc(cache);
+ new_pmd = kvm_mmu_memory_cache_alloc(cache);
pmd_init((unsigned long)new_pmd,
(unsigned long)invalid_pte_table);
pud_populate(NULL, pud, new_pmd);
@@ -164,11 +132,11 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
if (!cache)
return NULL;
- new_pte = mmu_memory_cache_alloc(cache);
+ new_pte = kvm_mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
- return pte_offset(pmd, addr);
+ return pte_offset_kernel(pmd, addr);
}
/* Caller must hold kvm->mm_lock */
@@ -187,8 +155,8 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
- int i_min = __pte_offset(start_gpa);
- int i_max = __pte_offset(end_gpa);
+ int i_min = pte_index(start_gpa);
+ int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -215,7 +183,7 @@ static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
@@ -312,8 +280,8 @@ static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
- int i_min = __pte_offset(start); \
- int i_max = __pte_offset(end); \
+ int i_min = pte_index(start); \
+ int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
@@ -346,7 +314,7 @@ static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
if (!pmd_present(pmd[i])) \
continue; \
\
- pte = pte_offset(pmd + i, 0); \
+ pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
@@ -518,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
+ unsigned flags)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
@@ -711,8 +680,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
goto out;
/* We need a minimum of cached pages ready for page table creation */
- err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (err)
goto out;
@@ -796,8 +764,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
int ret;
/* We need a minimum of cached pages ready for page table creation */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (ret)
return NULL;
@@ -842,8 +809,8 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
unsigned long end_gva)
{
- int i_min = __pte_offset(start_gva);
- int i_max = __pte_offset(end_gva);
+ int i_min = pte_index(start_gva);
+ int i_max = pte_index(end_gva);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -877,7 +844,7 @@ static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gva;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7cd92166a0b9..1c1fbce3f566 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -20,8 +20,8 @@
#include <asm/cpu.h>
#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/tlbdebug.h>
@@ -469,7 +469,7 @@ void kvm_vz_local_flush_guesttlb_all(void)
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
break;
- };
+ }
/* Invalidate guest entries in guest TLB */
write_gc0_entrylo0(0);
@@ -486,7 +486,7 @@ void kvm_vz_local_flush_guesttlb_all(void)
if (cvmmemctl2) {
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
write_c0_cvmmemctl2(cvmmemctl2);
- };
+ }
write_gc0_index(old_index);
write_gc0_entryhi(old_entryhi);
@@ -622,6 +622,46 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void)
+{
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ write_gc0_index(0);
+ guest_tlbinvf();
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
+
+void kvm_loongson_clear_guest_ftlb(void)
+{
+ int i;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ for (i = current_cpu_data.tlbsizevtlb;
+ i < (current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets);
+ i++) {
+ write_gc0_index(i);
+ guest_tlbinvf();
+ }
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
+#endif
+
#endif
/**
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 5a11e83dffe6..f8cba51e1054 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -67,7 +67,6 @@ static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
@@ -81,14 +80,14 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
* Unusable/no FPU in guest:
* deliver guest COP1 Unusable Exception
*/
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
} else {
/* Restore FPU state */
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
} else {
- er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_inst(cause, opc, vcpu);
}
switch (er) {
@@ -97,12 +96,12 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
case EMULATE_WAIT:
- run->exit_reason = KVM_EXIT_INTR;
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
ret = RESUME_HOST;
break;
@@ -116,8 +115,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
return ret;
}
-static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
@@ -125,7 +123,7 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
@@ -134,23 +132,22 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Emulate the load */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Emulate load from MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else {
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
}
return RESUME_HOST;
}
-static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
@@ -161,34 +158,33 @@ static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Emulate the store */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Emulate store to MMIO space failed\n");
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else {
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
}
return RESUME_HOST;
}
-static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
+static int kvm_mips_bad_access(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu, bool store)
{
if (store)
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
else
- return kvm_mips_bad_load(cause, opc, run, vcpu);
+ return kvm_mips_bad_load(cause, opc, vcpu);
}
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
@@ -212,12 +208,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* They would indicate stale host TLB entries.
*/
if (unlikely(index < 0)) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
tlb = vcpu->arch.guest_tlb + index;
if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
@@ -226,23 +222,23 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* exception. Relay that on to the guest so it can handle it.
*/
if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
- kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+ kvm_mips_emulate_tlbmod(cause, opc, vcpu);
return RESUME_GUEST;
}
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
true))
/* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST;
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
/* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST;
} else {
/* host kernel addresses are all handled as MMIO */
- return kvm_mips_bad_store(cause, opc, run, vcpu);
+ return kvm_mips_bad_store(cause, opc, vcpu);
}
}
@@ -276,7 +272,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* into the shadow host TLB
*/
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
+ er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
@@ -289,14 +285,14 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* not expect to ever get them
*/
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
/*
* With EVA we may get a TLB exception instead of an address
* error when the guest performs MMIO to KSeg1 addresses.
*/
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else {
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
store ? "ST" : "LD", cause, opc, badvaddr);
@@ -320,7 +316,6 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
@@ -328,11 +323,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- ret = kvm_mips_bad_store(cause, opc, run, vcpu);
+ ret = kvm_mips_bad_store(cause, opc, vcpu);
} else {
kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -340,18 +335,17 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause;
int ret = RESUME_GUEST;
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
- ret = kvm_mips_bad_load(cause, opc, run, vcpu);
+ ret = kvm_mips_bad_load(cause, opc, vcpu);
} else {
kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -359,17 +353,16 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_syscall(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -377,17 +370,16 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+ er = kvm_mips_handle_ri(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -395,17 +387,16 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -413,17 +404,16 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -431,17 +421,16 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -449,17 +438,16 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
@@ -474,7 +462,6 @@ static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
@@ -486,10 +473,10 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
* No MSA in guest, or FPU enabled and not in FR=1 mode,
* guest reserved instruction exception
*/
- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
/* MSA disabled by guest, guest MSA disabled exception */
- er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+ er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
} else {
/* Restore MSA/FPU state */
kvm_own_msa(vcpu);
@@ -502,7 +489,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
@@ -529,6 +516,9 @@ static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MIPS_TE:
r = 1;
break;
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
@@ -594,7 +584,7 @@ static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
pmd_va = pud_va | (k << PMD_SHIFT);
if (pmd_va >= end)
break;
- pte = pte_offset(pmd + k, 0);
+ pte = pte_offset_kernel(pmd + k, 0);
pte_free_kernel(NULL, pte);
}
pmd_free(NULL, pmd);
@@ -1181,8 +1171,7 @@ void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
local_irq_enable();
}
-static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
{
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
@@ -1225,7 +1214,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
check_mmu_context(mm);
}
-static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
@@ -1234,7 +1223,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
- kvm_trap_emul_vcpu_reenter(run, vcpu);
+ kvm_trap_emul_vcpu_reenter(vcpu);
/*
* We use user accessors to access guest memory, but we don't want to
@@ -1252,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
kvm_mips_suspend_mm(cpu);
- r = vcpu->arch.vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
/* We may have migrated while handling guest exits */
cpu = smp_processor_id();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index dde20887a70d..c299e5d6d69c 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -29,6 +29,9 @@
#include <linux/kvm_host.h>
#include "interrupt.h"
+#ifdef CONFIG_CPU_LOONGSON64
+#include "loongson_regs.h"
+#endif
#include "trace.h"
@@ -126,6 +129,11 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
+}
+
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
@@ -180,6 +188,12 @@ static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config6_guest_wrmask(vcpu) |
+ LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
+}
+
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
@@ -225,23 +239,7 @@ static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case 2:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -253,44 +251,22 @@ static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case -2:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
-static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
- [MIPS_EXC_INT_TIMER] = C_IRQ5,
- [MIPS_EXC_INT_IO] = C_IRQ0,
- [MIPS_EXC_INT_IPI_1] = C_IRQ1,
- [MIPS_EXC_INT_IPI_2] = C_IRQ2,
-};
-
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
@@ -311,7 +287,7 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
@@ -329,7 +305,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
@@ -899,7 +876,6 @@ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -966,7 +942,8 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
- (sel == 7)) || /* Config7 */
+ (sel == 6 || /* Config6 */
+ sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
@@ -974,6 +951,11 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ val = cop0->reg[rd][sel];
+#endif
} else {
val = 0;
er = EMULATE_FAIL;
@@ -1036,9 +1018,40 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_CONFIG &&
+ (sel == 6)) {
+ cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (val & LOONGSON_DIAG_BTB) {
+ /* Flush BTB */
+ set_c0_diag(LOONGSON_DIAG_BTB);
+ }
+ if (val & LOONGSON_DIAG_ITLB) {
+ /* Flush ITLB */
+ set_c0_diag(LOONGSON_DIAG_ITLB);
+ }
+ if (val & LOONGSON_DIAG_DTLB) {
+ /* Flush DTLB */
+ set_c0_diag(LOONGSON_DIAG_DTLB);
+ }
+ if (val & LOONGSON_DIAG_VTLB) {
+ /* Flush VTLB */
+ kvm_loongson_clear_guest_vtlb();
+ }
+ if (val & LOONGSON_DIAG_FTLB) {
+ /* Flush FTLB */
+ kvm_loongson_clear_guest_ftlb();
+ }
+ local_irq_restore(flags);
+#endif
} else {
er = EMULATE_FAIL;
}
@@ -1062,7 +1075,6 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
- struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
@@ -1118,7 +1130,7 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
break;
default:
break;
- };
+ }
kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
@@ -1129,12 +1141,81 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
return EMULATE_FAIL;
}
+#ifdef CONFIG_CPU_LOONGSON64
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned int rs, rd;
+ unsigned int hostcfg;
+ unsigned long curr_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rs = inst.loongson3_lscsr_format.rs;
+ rd = inst.loongson3_lscsr_format.rd;
+ switch (inst.loongson3_lscsr_format.fr) {
+ case 0x8: /* Read CPUCFG */
+ ++vcpu->stat.vz_cpucfg_exits;
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
+
+ switch (vcpu->arch.gprs[rs]) {
+ case LOONGSON_CFG0:
+ vcpu->arch.gprs[rd] = 0x14c000;
+ break;
+ case LOONGSON_CFG1:
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
+ LOONGSON_CFG1_SFBP);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG2:
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG3:
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ default:
+ /* Don't export any other advanced features to guest */
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ break;
+
+ default:
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+#endif
+
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
- struct kvm_run *run = vcpu->run;
union mips_instruction inst;
int rd, rt, sel;
int err;
@@ -1150,12 +1231,17 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
switch (inst.r_format.opcode) {
case cop0_op:
- er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
+ break;
+#endif
+#ifdef CONFIG_CPU_LOONGSON64
+ case lwc2_op:
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
break;
#endif
case spec3_op:
@@ -1163,7 +1249,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break;
#endif
case rdhwr_op:
@@ -1183,7 +1269,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), 0);
goto unknown;
- };
+ }
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
@@ -1192,7 +1278,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
break;
default:
goto unknown;
- };
+ }
break;
unknown:
@@ -1465,7 +1551,6 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
@@ -1493,7 +1578,7 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
break;
case EMULATE_FAIL:
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
@@ -1512,8 +1597,6 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
-
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
@@ -1524,7 +1607,7 @@ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
@@ -1560,7 +1643,7 @@ static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
}
/* Treat as MMIO */
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
@@ -1607,7 +1690,7 @@ static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
}
/* Treat as MMIO */
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
@@ -1652,6 +1735,7 @@ static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
@@ -1706,7 +1790,7 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
- if (cpu_guest_has_htw)
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
@@ -1755,7 +1839,7 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
@@ -1878,17 +1962,17 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
@@ -1896,7 +1980,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
@@ -1946,7 +2030,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
default:
*v = (long)kvm_read_c0_guest_prid(cop0);
break;
- };
+ }
break;
case KVM_REG_MIPS_CP0_EBASE:
*v = kvm_vz_read_gc0_ebase();
@@ -1979,6 +2063,9 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
return -EINVAL;
*v = read_gc0_config5();
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ *v = kvm_read_sw_gc0_config6(cop0);
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2101,17 +2188,17 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwsize(v);
break;
@@ -2119,7 +2206,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwctl(v);
break;
@@ -2185,7 +2272,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
default:
kvm_write_c0_guest_prid(cop0, v);
break;
- };
+ }
break;
case KVM_REG_MIPS_CP0_EBASE:
kvm_vz_write_gc0_ebase(v);
@@ -2248,6 +2335,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_config5(v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ cur = kvm_read_sw_gc0_config6(cop0);
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_sw_gc0_config6(cop0, (int)v);
+ }
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2580,7 +2675,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
/* restore HTW registers */
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
@@ -2597,7 +2692,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
- if (cpu_guest_has_rw_llb)
+ if (vcpu->kvm->created_vcpus > 1)
write_gc0_lladdr(0);
return 0;
@@ -2685,8 +2780,8 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
}
/* save HTW registers if enabled in guest */
- if (cpu_guest_has_htw &&
- kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
@@ -2853,8 +2948,12 @@ static int kvm_vz_hardware_enable(void)
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
- if (cpu_has_guestctl0ext)
- set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ if (cpu_has_guestctl0ext) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ else
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ }
if (cpu_has_guestid) {
write_c0_guestctl1(0);
@@ -2871,6 +2970,12 @@ static int kvm_vz_hardware_enable(void)
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
+#ifdef CONFIG_CPU_LOONGSON64
+ /* Control guest CCA attribute */
+ if (cpu_has_csr())
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
+#endif
+
return 0;
}
@@ -2927,6 +3032,9 @@ static int kvm_vz_check_extension(struct kvm *kvm, long ext)
r = 2;
break;
#endif
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
@@ -2980,7 +3088,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
*/
/* PageGrain */
- if (cpu_has_mips_r6)
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
/* Wired */
if (cpu_has_mips_r6)
@@ -2988,7 +3096,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
read_gc0_wired() & MIPSR6_WIRED_LIMIT);
/* Status */
kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
- if (cpu_has_mips_r6)
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
/* IntCtl */
kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
@@ -3086,7 +3194,7 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
}
/* reset HTW registers */
- if (cpu_guest_has_htw && cpu_has_mips_r6) {
+ if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
/* PWField */
kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
/* PWSize */
@@ -3129,7 +3237,7 @@ static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
kvm_vz_flush_shadow_all(kvm);
}
-static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int preserve_guest_tlb;
@@ -3145,7 +3253,7 @@ static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_wired(vcpu);
}
-static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
int r;
@@ -3158,7 +3266,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu);
- r = vcpu->arch.vcpu_run(run, vcpu);
+ r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
kvm_vz_vcpu_save_wired(vcpu);