diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/esr.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/guest.c | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/host.S | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/setup.c | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 |
8 files changed, 27 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index a305ce256090..d52a0b269ee8 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -68,6 +68,7 @@ #define ESR_ELx_EC_MAX (0x3F) #define ESR_ELx_EC_SHIFT (26) +#define ESR_ELx_EC_WIDTH (6) #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) #define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4be8486042a7..2a5f7f38006f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -584,7 +584,7 @@ struct kvm_vcpu_stat { u64 exits; }; -int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); +void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f5490afe1ebf..e4727dc771bf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -223,7 +223,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); + /* + * ARM64 treats KVM_CAP_NR_CPUS differently from all other + * architectures, as it does not always bound it to + * KVM_CAP_MAX_VCPUS. It should not matter much because + * this is just an advisory value. + */ + r = min_t(unsigned int, num_online_cpus(), + kvm_arm_default_max_vcpus()); break; case KVM_CAP_MAX_VCPUS: case KVM_CAP_MAX_VCPU_ID: @@ -1389,12 +1396,9 @@ long kvm_arch_vm_ioctl(struct file *filp, return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } case KVM_ARM_PREFERRED_TARGET: { - int err; struct kvm_vcpu_init init; - err = kvm_vcpu_preferred_target(&init); - if (err) - return err; + kvm_vcpu_preferred_target(&init); if (copy_to_user(argp, &init, sizeof(init))) return -EFAULT; diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 5ce26bedf23c..e116c7767730 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -869,13 +869,10 @@ u32 __attribute_const__ kvm_target_cpu(void) return KVM_ARM_TARGET_GENERIC_V8; } -int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) +void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) { u32 target = kvm_target_cpu(); - if (target < 0) - return -ENODEV; - memset(init, 0, sizeof(*init)); /* @@ -885,8 +882,6 @@ int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) * target type. */ init->target = (__u32)target; - - return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 9aa9b73475c9..b6b6801d96d5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -44,7 +44,7 @@ el1_sync: // Guest trapped into EL2 mrs x0, esr_el2 - lsr x0, x0, #ESR_ELx_EC_SHIFT + ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH cmp x0, #ESR_ELx_EC_HVC64 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 0c6116d34e18..3d613e721a75 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -141,7 +141,7 @@ SYM_FUNC_END(__host_hvc) .L__vect_start\@: stp x0, x1, [sp, #-16]! mrs x0, esr_el2 - lsr x0, x0, #ESR_ELx_EC_SHIFT + ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH cmp x0, #ESR_ELx_EC_HVC64 b.eq __host_hvc b __host_exit diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 862c7b514e20..578f71798c2e 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -178,7 +178,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, phys = kvm_pte_to_phys(pte); if (!addr_is_memory(phys)) - return 0; + return -EINVAL; /* * Adjust the host stage-2 mappings to match the ownership attributes @@ -207,8 +207,18 @@ static int finalize_host_mappings(void) .cb = finalize_host_mappings_walker, .flags = KVM_PGTABLE_WALK_LEAF, }; + int i, ret; + + for (i = 0; i < hyp_memblock_nr; i++) { + struct memblock_region *reg = &hyp_memory[i]; + u64 start = (u64)hyp_phys_to_virt(reg->base); + + ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker); + if (ret) + return ret; + } - return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker); + return 0; } void __noreturn __pkvm_init_finalise(void) diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 3787ee6fb1a2..792cf6e6ac92 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -474,7 +474,7 @@ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } -/** +/* * Handler for protected VM restricted exceptions. * * Inject an undefined exception into the guest and return true to indicate that |