diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2016-09-29 16:26:52 +0200 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2016-09-29 16:26:52 +0200 |
commit | d9ab710b85310e4ba9295f2b494eda54cf1a355a (patch) | |
tree | 439f5b76e1e85e792d6913056c7edf6923ba1b76 | |
parent | Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
parent | KVM: MIPS: Drop dubious EntryHi optimisation (diff) | |
download | linux-d9ab710b85310e4ba9295f2b494eda54cf1a355a.tar.xz linux-d9ab710b85310e4ba9295f2b494eda54cf1a355a.zip |
Merge tag 'kvm_mips_4.9_1' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/kvm-mips into next
MIPS KVM updates for v4.9
- A couple of fixes in preparation for supporting MIPS EVA host kernels.
- MIPS SMP host & TLB invalidation fixes.
Diffstat (limited to '')
-rw-r--r-- | arch/mips/include/asm/kvm_host.h | 17 | ||||
-rw-r--r-- | arch/mips/kvm/emulate.c | 78 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 30 | ||||
-rw-r--r-- | arch/mips/kvm/mmu.c | 16 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 18 |
5 files changed, 143 insertions, 16 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 5f488dc8a7d5..07f58cfc1ab9 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -107,6 +107,20 @@ #define KVM_INVALID_INST 0xdeadbeef #define KVM_INVALID_ADDR 0xdeadbeef +/* + * EVA has overlapping user & kernel address spaces, so user VAs may be > + * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of + * PAGE_OFFSET. + */ + +#define KVM_HVA_ERR_BAD (-1UL) +#define KVM_HVA_ERR_RO_BAD (-2UL) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return IS_ERR_VALUE(addr); +} + extern atomic_t kvm_mips_instance; struct kvm_vm_stat { @@ -314,6 +328,9 @@ struct kvm_vcpu_arch { u32 guest_kernel_asid[NR_CPUS]; struct mm_struct guest_kernel_mm, guest_user_mm; + /* Guest ASID of last user mode execution */ + unsigned int last_user_gasid; + int last_sched_cpu; /* WAIT executed */ diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index e788515f766b..4db4c0370859 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -846,6 +846,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) return EMULATE_FAIL; } +/** + * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. + * @vcpu: VCPU with changed mappings. + * @tlb: TLB entry being removed. + * + * This is called to indicate a single change in guest MMU mappings, so that we + * can arrange TLB flushes on this and other CPUs. + */ +static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb) +{ + int cpu, i; + bool user; + + /* No need to flush for entries which are already invalid */ + if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; + + preempt_disable(); + + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + /* Invalidate the whole ASID on other CPUs */ + cpu = smp_processor_id(); + for_each_possible_cpu(i) { + if (i == cpu) + continue; + if (user) + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + + preempt_enable(); +} + /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { @@ -865,11 +906,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) } tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -898,11 +936,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -1026,6 +1060,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, enum emulation_result er = EMULATE_DONE; u32 rt, rd, sel; unsigned long curr_pc; + int cpu, i; /* * Update PC and hold onto current PC in case there is @@ -1127,16 +1162,31 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { u32 nasid = vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; - if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && - ((kvm_read_c0_guest_entryhi(cop0) & + if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID, nasid); - /* Blow away the shadow host TLBs */ - kvm_mips_flush_host_tlb(1); + /* + * Regenerate/invalidate kernel MMU + * context. + * The user MMU context will be + * regenerated lazily on re-entry to + * guest user if the guest ASID actually + * changes. + */ + preempt_disable(); + cpu = smp_processor_id(); + kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, + cpu, vcpu); + vcpu->arch.guest_kernel_asid[cpu] = + vcpu->arch.guest_kernel_mm.context.asid[cpu]; + for_each_possible_cpu(i) + if (i != cpu) + vcpu->arch.guest_kernel_asid[i] = 0; + preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 49b25e74d0c7..ce961495b5e1 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -421,6 +421,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -ENOIOCTLCMD; } +/* Must be called with preemption disabled, just before entering guest */ +static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int cpu = smp_processor_id(); + unsigned int gasid; + + /* + * Lazy host ASID regeneration for guest user mode. + * If the guest ASID has changed since the last guest usermode + * execution, regenerate the host ASID so as to invalidate stale TLB + * entries. + */ + if (!KVM_GUEST_KERNEL_MODE(vcpu)) { + gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; + if (gasid != vcpu->arch.last_user_gasid) { + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, + vcpu); + vcpu->arch.guest_user_asid[cpu] = + vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; + } + } +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = 0; @@ -448,6 +473,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) htw_stop(); trace_kvm_enter(vcpu); + + kvm_mips_check_asids(vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); trace_kvm_out(vcpu); @@ -1561,6 +1589,8 @@ skip_emul: if (ret == RESUME_GUEST) { trace_kvm_reenter(vcpu); + kvm_mips_check_asids(vcpu); + /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 121008c0fcc9..03883ba806e2 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -250,15 +250,27 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_mm.context.asid[cpu]; + newasid++; + + kvm_debug("[%d]: cpu_context: %#lx\n", cpu, + cpu_context(cpu, current->mm)); + kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", + cpu, vcpu->arch.guest_kernel_asid[cpu]); + } + + if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & + asid_version_mask(cpu)) { + u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & + KVM_ENTRYHI_ASID; + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; newasid++; kvm_debug("[%d]: cpu_context: %#lx\n", cpu, cpu_context(cpu, current->mm)); - kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", - cpu, vcpu->arch.guest_kernel_asid[cpu]); kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, vcpu->arch.guest_user_asid[cpu]); } diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 091553942bcb..3a5484f9aa50 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -175,6 +175,24 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } + } else if (KVM_GUEST_KERNEL_MODE(vcpu) + && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { + /* + * With EVA we may get a TLB exception instead of an address + * error when the guest performs MMIO to KSeg1 addresses. + */ + kvm_debug("Emulate %s MMIO space\n", + store ? "Store to" : "Load from"); + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + kvm_err("Emulate %s MMIO space failed\n", + store ? "Store to" : "Load from"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } } else { kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", store ? "ST" : "LD", cause, opc, badvaddr); |