diff options
author | Anup Patel <apatel@ventanamicro.com> | 2024-10-20 21:47:32 +0200 |
---|---|---|
committer | Anup Patel <anup@brainfault.org> | 2024-10-28 12:14:03 +0100 |
commit | 68c72a6557b072bff79658b9c0fdb0e69148e32d (patch) | |
tree | ea5948f1ff14a011f397766621575914b011ec54 /arch | |
parent | RISC-V: KVM: Use nacl_csr_xyz() for accessing AIA CSRs (diff) | |
download | linux-68c72a6557b072bff79658b9c0fdb0e69148e32d.tar.xz linux-68c72a6557b072bff79658b9c0fdb0e69148e32d.zip |
RISC-V: KVM: Use SBI sync SRET call when available
Implement an optimized KVM world-switch using SBI sync SRET call
when SBI nested acceleration extension is available. This improves
KVM world-switch when KVM RISC-V is running as a Guest under some
other hypervisor.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20241020194734.58686-12-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/riscv/include/asm/kvm_nacl.h | 6 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 48 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_switch.S | 29 |
3 files changed, 79 insertions, 4 deletions
diff --git a/arch/riscv/include/asm/kvm_nacl.h b/arch/riscv/include/asm/kvm_nacl.h index 8f3e3ebf5017..4124d5e06a0f 100644 --- a/arch/riscv/include/asm/kvm_nacl.h +++ b/arch/riscv/include/asm/kvm_nacl.h @@ -12,6 +12,8 @@ #include <asm/csr.h> #include <asm/sbi.h> +struct kvm_vcpu_arch; + DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available); #define kvm_riscv_nacl_available() \ static_branch_unlikely(&kvm_riscv_nacl_available) @@ -43,6 +45,10 @@ void __kvm_riscv_nacl_hfence(void *shmem, unsigned long page_num, unsigned long page_count); +void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch, + unsigned long sbi_ext_id, + unsigned long sbi_func_id); + int kvm_riscv_nacl_enable(void); void kvm_riscv_nacl_disable(void); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 1fb2ac9f34cc..113eb8957472 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -766,19 +766,59 @@ static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *v */ static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) { + void *nsh; struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; struct kvm_cpu_context *hcntx = &vcpu->arch.host_context; kvm_riscv_vcpu_swap_in_guest_state(vcpu); guest_state_enter_irqoff(); - hcntx->hstatus = ncsr_swap(CSR_HSTATUS, gcntx->hstatus); + if (kvm_riscv_nacl_sync_sret_available()) { + nsh = nacl_shmem(); - nsync_csr(-1UL); + if (kvm_riscv_nacl_autoswap_csr_available()) { + hcntx->hstatus = + nacl_csr_read(nsh, CSR_HSTATUS); + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET + + SBI_NACL_SHMEM_AUTOSWAP_HSTATUS, + gcntx->hstatus); + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET, + SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS); + } else if (kvm_riscv_nacl_sync_csr_available()) { + hcntx->hstatus = nacl_csr_swap(nsh, + CSR_HSTATUS, gcntx->hstatus); + } else { + hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); + } - __kvm_riscv_switch_to(&vcpu->arch); + nacl_scratch_write_longs(nsh, + SBI_NACL_SHMEM_SRET_OFFSET + + SBI_NACL_SHMEM_SRET_X(1), + &gcntx->ra, + SBI_NACL_SHMEM_SRET_X_LAST); + + __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL, + SBI_EXT_NACL_SYNC_SRET); + + if (kvm_riscv_nacl_autoswap_csr_available()) { + nacl_scratch_write_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET, + 0); + gcntx->hstatus = nacl_scratch_read_long(nsh, + SBI_NACL_SHMEM_AUTOSWAP_OFFSET + + SBI_NACL_SHMEM_AUTOSWAP_HSTATUS); + } else { + gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); + } + } else { + hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); - gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); + __kvm_riscv_switch_to(&vcpu->arch); + + gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); + } vcpu->arch.last_exit_cpu = vcpu->cpu; guest_state_exit_irqoff(); diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S index 9f13e5ce6a18..47686bcb21e0 100644 --- a/arch/riscv/kvm/vcpu_switch.S +++ b/arch/riscv/kvm/vcpu_switch.S @@ -218,6 +218,35 @@ SYM_FUNC_START(__kvm_riscv_switch_to) ret SYM_FUNC_END(__kvm_riscv_switch_to) + /* + * Parameters: + * A0 <= Pointer to struct kvm_vcpu_arch + * A1 <= SBI extension ID + * A2 <= SBI function ID + */ +SYM_FUNC_START(__kvm_riscv_nacl_switch_to) + SAVE_HOST_GPRS + + SAVE_HOST_AND_RESTORE_GUEST_CSRS .Lkvm_nacl_switch_return + + /* Resume Guest using SBI nested acceleration */ + add a6, a2, zero + add a7, a1, zero + ecall + + /* Back to Host */ + .align 2 +.Lkvm_nacl_switch_return: + SAVE_GUEST_GPRS + + SAVE_GUEST_AND_RESTORE_HOST_CSRS + + RESTORE_HOST_GPRS + + /* Return to C code */ + ret +SYM_FUNC_END(__kvm_riscv_nacl_switch_to) + SYM_CODE_START(__kvm_riscv_unpriv_trap) /* * We assume that faulting unpriv load/store instruction is |