summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/include/asm')
-rw-r--r--arch/loongarch/include/asm/Kbuild2
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h6
-rw-r--r--arch/loongarch/include/asm/kvm_host.h37
-rw-r--r--arch/loongarch/include/asm/kvm_para.h12
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h11
-rw-r--r--arch/loongarch/include/asm/loongarch.h11
-rw-r--r--arch/loongarch/include/asm/mmzone.h16
-rw-r--r--arch/loongarch/include/asm/paravirt.h7
-rw-r--r--arch/loongarch/include/asm/qspinlock.h41
-rw-r--r--arch/loongarch/include/asm/topology.h1
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h39
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h6
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h8
13 files changed, 169 insertions, 28 deletions
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 2bb3676429c0..5b5a6c90e6e2 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -6,8 +6,8 @@ generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += ioctl.h
+generic-y += mmzone.h
generic-y += statfs.h
generic-y += param.h
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
index 724ca8b7b401..4a76ce796f1f 100644
--- a/arch/loongarch/include/asm/kvm_csr.h
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
+ __v; \
})
#define gcsr_xchg(v, m, csr) \
@@ -181,6 +182,8 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
+
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
@@ -208,4 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
csr->csrs[gid] |= val & _mask;
}
+#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3)
+
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 5f0677e03817..d6bb72424027 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -30,6 +30,7 @@
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1)
+#define KVM_REQ_PMU KVM_ARCH_REQ(2)
#define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -60,9 +61,13 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
+#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
+ /* Host PMU CSR */
+ u64 perf_ctrl[HOST_MAX_PMNUM];
+ u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@@ -107,6 +112,8 @@ struct kvm_arch {
unsigned int root_level;
spinlock_t phyid_map_lock;
struct kvm_phyid_map *phyid_map;
+ /* Enabled PV features */
+ unsigned long pv_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
@@ -133,8 +140,15 @@ enum emulation_result {
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
+#define KVM_LARCH_LBT (0x1 << 3)
+#define KVM_LARCH_PMU (0x1 << 4)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 5)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 6)
+
+#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
+#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
+ BIT(KVM_FEATURE_STEAL_TIME) | \
+ BIT(KVM_FEATURE_VIRT_EXTIOI))
struct kvm_vcpu_arch {
/*
@@ -168,10 +182,14 @@ struct kvm_vcpu_arch {
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt;
/* CSR state */
struct loongarch_csrs *csr;
+ /* Guest max PMU CSR id */
+ int max_pmu_csrid;
+
/* GPR used as IO source/target */
u32 io_gpr;
@@ -239,6 +257,21 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
return arch->cpucfg[2] & CPUCFG2_LASX;
}
+static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT);
+}
+
+static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
+{
+ return arch->cpucfg[6] & CPUCFG6_PMP;
+}
+
+static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
+{
+ return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
+}
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h
index 43ec61589e6c..c4e84227280d 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -2,6 +2,8 @@
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
+#include <uapi/asm/kvm_para.h>
+
/*
* Hypercall code field
*/
@@ -154,10 +156,20 @@ static __always_inline long kvm_hypercall5(u64 fid,
return ret;
}
+#ifdef CONFIG_PARAVIRT
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+#else
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
+#endif
static inline unsigned int kvm_arch_para_hints(void)
{
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index 86570084e05a..d7e8f7d50ee0 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -75,6 +75,12 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+int kvm_own_lbt(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
+#endif
+
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
@@ -124,4 +130,9 @@ static inline bool kvm_pvtime_supported(void)
return !!sched_info_on();
}
+static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
+{
+ return vcpu->kvm->arch.pv_features & BIT(feature);
+}
+
#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 4084cb64ba06..26542413a5b0 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -120,6 +120,7 @@
#define CPUCFG6_PMP BIT(0)
#define CPUCFG6_PAMVER GENMASK(3, 1)
#define CPUCFG6_PMNUM GENMASK(7, 4)
+#define CPUCFG6_PMNUM_SHIFT 4
#define CPUCFG6_PMBITS GENMASK(13, 8)
#define CPUCFG6_UPM BIT(14)
@@ -161,16 +162,8 @@
/*
* CPUCFG index area: 0x40000000 -- 0x400000ff
- * SW emulation for KVM hypervirsor
+ * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h
*/
-#define CPUCFG_KVM_BASE 0x40000000
-#define CPUCFG_KVM_SIZE 0x100
-
-#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0)
-#define KVM_SIGNATURE "KVM\0"
-#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
-#define KVM_FEATURE_IPI BIT(1)
-#define KVM_FEATURE_STEAL_TIME BIT(2)
#ifndef __ASSEMBLY__
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
deleted file mode 100644
index 2b9a90727e19..000000000000
--- a/arch/loongarch/include/asm/mmzone.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Author: Huacai Chen (chenhuacai@loongson.cn)
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/page.h>
-#include <asm/numa.h>
-
-extern struct pglist_data *node_data[];
-
-#define NODE_DATA(nid) (node_data[(nid)])
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h
index dddec49671ae..3f4323603e6a 100644
--- a/arch/loongarch/include/asm/paravirt.h
+++ b/arch/loongarch/include/asm/paravirt.h
@@ -19,6 +19,7 @@ static inline u64 paravirt_steal_clock(int cpu)
int __init pv_ipi_init(void);
int __init pv_time_init(void);
+int __init pv_spinlock_init(void);
#else
@@ -31,5 +32,11 @@ static inline int pv_time_init(void)
{
return 0;
}
+
+static inline int pv_spinlock_init(void)
+{
+ return 0;
+}
+
#endif // CONFIG_PARAVIRT
#endif
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
new file mode 100644
index 000000000000..e76d3aa1e1eb
--- /dev/null
+++ b/arch/loongarch/include/asm/qspinlock.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_LOONGARCH_QSPINLOCK_H
+#define _ASM_LOONGARCH_QSPINLOCK_H
+
+#include <linux/jump_label.h>
+
+#ifdef CONFIG_PARAVIRT
+
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+#define virt_spin_lock virt_spin_lock
+
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ int val;
+
+ if (!static_branch_unlikely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+__retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
+}
+
+#endif /* CONFIG_PARAVIRT */
+
+#include <asm-generic/qspinlock.h>
+
+#endif // _ASM_LOONGARCH_QSPINLOCK_H
diff --git a/arch/loongarch/include/asm/topology.h b/arch/loongarch/include/asm/topology.h
index 66128dec0bf6..50273c9187d0 100644
--- a/arch/loongarch/include/asm/topology.h
+++ b/arch/loongarch/include/asm/topology.h
@@ -8,6 +8,7 @@
#include <linux/smp.h>
#ifdef CONFIG_NUMA
+#include <asm/numa.h>
extern cpumask_t cpus_on_node[];
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
new file mode 100644
index 000000000000..02f36772541b
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Xi Ruoyao <xry111@xry111.site>. All Rights Reserved.
+ */
+#ifndef __ASM_VDSO_GETRANDOM_H
+#define __ASM_VDSO_GETRANDOM_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/vdso/vdso.h>
+
+static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, unsigned int _flags)
+{
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_getrandom;
+ register void *buffer asm("a0") = _buffer;
+ register size_t len asm("a1") = _len;
+ register unsigned int flags asm("a2") = _flags;
+
+ asm volatile(
+ " syscall 0\n"
+ : "+r" (ret)
+ : "r" (nr), "r" (buffer), "r" (len), "r" (flags)
+ : "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+
+ return ret;
+}
+
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+{
+ return (const struct vdso_rng_data *)(get_vdso_data() + VVAR_LOONGARCH_PAGES_START *
+ PAGE_SIZE + offsetof(struct loongarch_vdso_data, rng_data));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 5a12309d9fb5..e31ac7474513 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -4,6 +4,9 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#ifndef _ASM_VDSO_VDSO_H
+#define _ASM_VDSO_VDSO_H
+
#ifndef __ASSEMBLY__
#include <asm/asm.h>
@@ -16,6 +19,7 @@ struct vdso_pcpu_data {
struct loongarch_vdso_data {
struct vdso_pcpu_data pdata[NR_CPUS];
+ struct vdso_rng_data rng_data;
};
/*
@@ -63,3 +67,5 @@ static inline unsigned long get_vdso_data(void)
}
#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 5de615383a22..b1273ce6f140 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -8,6 +8,7 @@
#include <vdso/datapage.h>
extern struct vdso_data *vdso_data;
+extern struct vdso_rng_data *vdso_rng_data;
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
@@ -19,6 +20,13 @@ struct vdso_data *__loongarch_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
+static __always_inline
+struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void)
+{
+ return vdso_rng_data;
+}
+#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data
+
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>