diff options
author | James Hogan <james.hogan@imgtec.com> | 2016-10-12 00:14:39 +0200 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2017-02-03 16:20:45 +0100 |
commit | c550d53934d821dbdd867ca314d417f2e918c72c (patch) | |
tree | 54fd7a9dce7d5ae71c9720458a833f66d1619c99 /arch/mips/kvm/trap_emul.c | |
parent | KVM: MIPS/MMU: Move preempt/ASID handling to implementation (diff) | |
download | linux-c550d53934d821dbdd867ca314d417f2e918c72c.tar.xz linux-c550d53934d821dbdd867ca314d417f2e918c72c.zip |
KVM: MIPS: Remove duplicated ASIDs from vcpu
The kvm_vcpu_arch structure contains both mm_structs for allocating MMU
contexts (primarily the ASID) but it also copies the resulting ASIDs
into guest_{user,kernel}_asid[] arrays which are referenced from uasm
generated code.
This duplication doesn't seem to serve any purpose, and it gets in the
way of generalising the ASID handling across guest kernel/user modes, so
lets just extract the ASID straight out of the mm_struct on demand, and
in fact there are convenient cpu_context() and cpu_asid() macros for
doing so.
To reduce the verbosity of this code we do also add kern_mm and user_mm
local variables where the kernel and user mm_structs are used.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Diffstat (limited to 'arch/mips/kvm/trap_emul.c')
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 494a90221b5e..c7854d32fd64 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -635,32 +635,29 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); + struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; + struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; /* Allocate new kernel and user ASIDs if needed */ - if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & + if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) { - kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); - vcpu->arch.guest_kernel_asid[cpu] = - vcpu->arch.guest_kernel_mm.context.asid[cpu]; + kvm_get_new_mmu_context(kern_mm, cpu, vcpu); kvm_debug("[%d]: cpu_context: %#lx\n", cpu, cpu_context(cpu, current->mm)); - kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", - cpu, vcpu->arch.guest_kernel_asid[cpu]); + kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n", + cpu, cpu_context(cpu, kern_mm)); } - if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & + if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) & asid_version_mask(cpu)) { - kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); - vcpu->arch.guest_user_asid[cpu] = - vcpu->arch.guest_user_mm.context.asid[cpu]; + kvm_get_new_mmu_context(user_mm, cpu, vcpu); kvm_debug("[%d]: cpu_context: %#lx\n", cpu, cpu_context(cpu, current->mm)); - kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, - vcpu->arch.guest_user_asid[cpu]); + kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n", + cpu, cpu_context(cpu, user_mm)); } /* @@ -670,11 +667,9 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] & - asid_mask); + write_c0_entryhi(cpu_asid(cpu, kern_mm)); else - write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] & - asid_mask); + write_c0_entryhi(cpu_asid(cpu, user_mm)); ehb(); } |