1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
*/
#ifndef __ASM_LOONGARCH_KVM_HOST_H__
#define __ASM_LOONGARCH_KVM_HOST_H__
#include <linux/cpumask.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/kvm_mmu.h>
#include <asm/loongarch.h>
/* Loongarch KVM register ids */
#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
#define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
};
struct kvm_vcpu_stat {
struct kvm_vcpu_stat_generic generic;
u64 int_exits;
u64 idle_exits;
u64 cpucfg_exits;
u64 signal_exits;
};
struct kvm_arch_memory_slot {
};
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
};
struct kvm_world_switch {
int (*exc_entry)(void);
int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long page_order;
};
#define MAX_PGTABLE_LEVELS 4
struct kvm_arch {
/* Guest physical mm */
kvm_pte_t *pgd;
unsigned long gpa_size;
unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
unsigned int root_level;
s64 time_offset;
struct kvm_context __percpu *vmcs;
};
#define CSR_MAX_NUMS 0x800
struct loongarch_csrs {
unsigned long csrs[CSR_MAX_NUMS];
};
/* Resume Flags */
#define RESUME_HOST 0
#define RESUME_GUEST 1
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
EMULATE_DO_IOCSR, /* handle IOCSR request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_EXCEPT, /* A guest exception has been generated */
};
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
struct kvm_vcpu_arch {
/*
* Switch pointer-to-function type to unsigned long
* for loading the value into register directly.
*/
unsigned long host_eentry;
unsigned long guest_eentry;
/* Pointers stored here for easy accessing from assembly code */
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
/* Host registers preserved across guest mode execution */
unsigned long host_sp;
unsigned long host_tp;
unsigned long host_pgd;
/* Host CSRs are used when handling exits from guest */
unsigned long badi;
unsigned long badv;
unsigned long host_ecfg;
unsigned long host_estat;
unsigned long host_percpu;
/* GPRs */
unsigned long gprs[32];
unsigned long pc;
/* Which auxiliary state is loaded (KVM_LARCH_*) */
unsigned int aux_inuse;
/* FPU state */
struct loongarch_fpu fpu FPU_ALIGN;
/* CSR state */
struct loongarch_csrs *csr;
/* GPR used as IO source/target */
u32 io_gpr;
/* KVM register to control count timer */
u32 count_ctl;
struct hrtimer swtimer;
/* Bitmask of intr that are pending */
unsigned long irq_pending;
/* Bitmask of pending intr to be cleared */
unsigned long irq_clear;
/* Bitmask of exceptions that are pending */
unsigned long exception_pending;
unsigned int esubcode;
/* Cache for pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
/* vcpu's vpid */
u64 vpid;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
ktime_t expire;
/* Last CPU the vCPU state was loaded on */
int last_sched_cpu;
/* mp state */
struct kvm_mp_state mp_state;
/* cpucfg */
u32 cpucfg[KVM_MAX_CPUCFG_REGS];
};
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
{
return csr->csrs[reg];
}
static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
{
csr->csrs[reg] = val;
}
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
static inline void update_pc(struct kvm_vcpu_arch *arch)
{
arch->pc += 4;
}
/*
* kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
* @vcpu: Virtual CPU.
*
* Returns: Whether the TLBL exception was likely due to an instruction
* fetch fault rather than a data load fault.
*/
static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
{
return arch->pc == arch->badv;
}
/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
void kvm_check_vpid(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
void kvm_init_vmcs(struct kvm *kvm);
void kvm_exc_entry(void);
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern unsigned long vpid_mask;
extern const unsigned long kvm_exception_size;
extern const unsigned long kvm_enter_guest_size;
extern struct kvm_world_switch *kvm_loongarch_ops;
#define SW_GCSR (1 << 0)
#define HW_GCSR (1 << 1)
#define INVALID_GCSR (1 << 2)
int get_gcsr_flag(int csr);
void set_hw_gcsr(int csr_id, unsigned long val);
#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
|