]>
Commit | Line | Data |
---|---|---|
99cdc6c1 AP |
1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* | |
3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. | |
4 | * | |
5 | * Authors: | |
6 | * Anup Patel <anup.patel@wdc.com> | |
7 | */ | |
8 | ||
9 | #ifndef __RISCV_KVM_HOST_H__ | |
10 | #define __RISCV_KVM_HOST_H__ | |
11 | ||
12 | #include <linux/types.h> | |
13 | #include <linux/kvm.h> | |
14 | #include <linux/kvm_types.h> | |
13acfec2 | 15 | #include <linux/spinlock.h> |
9bfd900b | 16 | #include <asm/hwcap.h> |
54e43320 | 17 | #include <asm/kvm_aia.h> |
0f4b8257 | 18 | #include <asm/ptrace.h> |
0a86512d | 19 | #include <asm/kvm_vcpu_fp.h> |
b91f0e4c | 20 | #include <asm/kvm_vcpu_insn.h> |
23fe562e | 21 | #include <asm/kvm_vcpu_sbi.h> |
3a9f66cb | 22 | #include <asm/kvm_vcpu_timer.h> |
8f0153ec | 23 | #include <asm/kvm_vcpu_pmu.h> |
99cdc6c1 | 24 | |
486a3842 | 25 | #define KVM_MAX_VCPUS 1024 |
99cdc6c1 AP |
26 | |
27 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | |
28 | ||
29 | #define KVM_VCPU_MAX_FEATURES 0 | |
30 | ||
00f918f6 AP |
31 | #define KVM_IRQCHIP_NUM_PINS 1024 |
32 | ||
99cdc6c1 AP |
33 | #define KVM_REQ_SLEEP \ |
34 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
35 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) | |
fd7bb4a2 | 36 | #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) |
13acfec2 AP |
37 | #define KVM_REQ_FENCE_I \ |
38 | KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
39 | #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH | |
40 | #define KVM_REQ_HFENCE_VVMA_ALL \ | |
41 | KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
42 | #define KVM_REQ_HFENCE \ | |
43 | KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
44 | ||
45 | enum kvm_riscv_hfence_type { | |
46 | KVM_RISCV_HFENCE_UNKNOWN = 0, | |
47 | KVM_RISCV_HFENCE_GVMA_VMID_GPA, | |
48 | KVM_RISCV_HFENCE_VVMA_ASID_GVA, | |
49 | KVM_RISCV_HFENCE_VVMA_ASID_ALL, | |
50 | KVM_RISCV_HFENCE_VVMA_GVA, | |
51 | }; | |
52 | ||
53 | struct kvm_riscv_hfence { | |
54 | enum kvm_riscv_hfence_type type; | |
55 | unsigned long asid; | |
56 | unsigned long order; | |
57 | gpa_t addr; | |
58 | gpa_t size; | |
59 | }; | |
60 | ||
61 | #define KVM_RISCV_VCPU_MAX_HFENCE 64 | |
99cdc6c1 AP |
62 | |
63 | struct kvm_vm_stat { | |
64 | struct kvm_vm_stat_generic generic; | |
65 | }; | |
66 | ||
67 | struct kvm_vcpu_stat { | |
68 | struct kvm_vcpu_stat_generic generic; | |
69 | u64 ecall_exit_stat; | |
70 | u64 wfi_exit_stat; | |
71 | u64 mmio_exit_user; | |
72 | u64 mmio_exit_kernel; | |
8a061562 AP |
73 | u64 csr_exit_user; |
74 | u64 csr_exit_kernel; | |
54ce3f7f | 75 | u64 signal_exits; |
99cdc6c1 AP |
76 | u64 exits; |
77 | }; | |
78 | ||
79 | struct kvm_arch_memory_slot { | |
80 | }; | |
81 | ||
fd7bb4a2 AP |
82 | struct kvm_vmid { |
83 | /* | |
84 | * Writes to vmid_version and vmid happen with vmid_lock held | |
85 | * whereas reads happen without any lock held. | |
86 | */ | |
87 | unsigned long vmid_version; | |
88 | unsigned long vmid; | |
89 | }; | |
90 | ||
99cdc6c1 | 91 | struct kvm_arch { |
26708234 | 92 | /* G-stage vmid */ |
fd7bb4a2 AP |
93 | struct kvm_vmid vmid; |
94 | ||
26708234 | 95 | /* G-stage page table */ |
99cdc6c1 AP |
96 | pgd_t *pgd; |
97 | phys_addr_t pgd_phys; | |
3a9f66cb AP |
98 | |
99 | /* Guest Timer */ | |
100 | struct kvm_guest_timer timer; | |
54e43320 AP |
101 | |
102 | /* AIA Guest/VM context */ | |
103 | struct kvm_aia aia; | |
99cdc6c1 AP |
104 | }; |
105 | ||
106 | struct kvm_cpu_trap { | |
107 | unsigned long sepc; | |
108 | unsigned long scause; | |
109 | unsigned long stval; | |
110 | unsigned long htval; | |
111 | unsigned long htinst; | |
112 | }; | |
113 | ||
a33c72fa AP |
114 | struct kvm_cpu_context { |
115 | unsigned long zero; | |
116 | unsigned long ra; | |
117 | unsigned long sp; | |
118 | unsigned long gp; | |
119 | unsigned long tp; | |
120 | unsigned long t0; | |
121 | unsigned long t1; | |
122 | unsigned long t2; | |
123 | unsigned long s0; | |
124 | unsigned long s1; | |
125 | unsigned long a0; | |
126 | unsigned long a1; | |
127 | unsigned long a2; | |
128 | unsigned long a3; | |
129 | unsigned long a4; | |
130 | unsigned long a5; | |
131 | unsigned long a6; | |
132 | unsigned long a7; | |
133 | unsigned long s2; | |
134 | unsigned long s3; | |
135 | unsigned long s4; | |
136 | unsigned long s5; | |
137 | unsigned long s6; | |
138 | unsigned long s7; | |
139 | unsigned long s8; | |
140 | unsigned long s9; | |
141 | unsigned long s10; | |
142 | unsigned long s11; | |
143 | unsigned long t3; | |
144 | unsigned long t4; | |
145 | unsigned long t5; | |
146 | unsigned long t6; | |
147 | unsigned long sepc; | |
148 | unsigned long sstatus; | |
149 | unsigned long hstatus; | |
5de52d4a | 150 | union __riscv_fp_state fp; |
0f4b8257 | 151 | struct __riscv_v_ext_state vector; |
a33c72fa AP |
152 | }; |
153 | ||
154 | struct kvm_vcpu_csr { | |
155 | unsigned long vsstatus; | |
156 | unsigned long vsie; | |
157 | unsigned long vstvec; | |
158 | unsigned long vsscratch; | |
159 | unsigned long vsepc; | |
160 | unsigned long vscause; | |
161 | unsigned long vstval; | |
162 | unsigned long hvip; | |
163 | unsigned long vsatp; | |
164 | unsigned long scounteren; | |
db3c01c7 | 165 | unsigned long senvcfg; |
a33c72fa AP |
166 | }; |
167 | ||
fe0bab70 MC |
168 | struct kvm_vcpu_config { |
169 | u64 henvcfg; | |
d21b5d34 | 170 | u64 hstateen0; |
fe0bab70 MC |
171 | }; |
172 | ||
81f0f314 MC |
173 | struct kvm_vcpu_smstateen_csr { |
174 | unsigned long sstateen0; | |
175 | }; | |
176 | ||
99cdc6c1 | 177 | struct kvm_vcpu_arch { |
a33c72fa AP |
178 | /* VCPU ran at least once */ |
179 | bool ran_atleast_once; | |
180 | ||
92e45050 AP |
181 | /* Last Host CPU on which Guest VCPU exited */ |
182 | int last_exit_cpu; | |
183 | ||
a33c72fa | 184 | /* ISA feature bits (similar to MISA) */ |
9bfd900b | 185 | DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); |
a33c72fa | 186 | |
52ec4b69 AP |
187 | /* Vendor, Arch, and Implementation details */ |
188 | unsigned long mvendorid; | |
189 | unsigned long marchid; | |
190 | unsigned long mimpid; | |
191 | ||
34bde9d8 AP |
192 | /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ |
193 | unsigned long host_sscratch; | |
194 | unsigned long host_stvec; | |
195 | unsigned long host_scounteren; | |
db3c01c7 | 196 | unsigned long host_senvcfg; |
81f0f314 | 197 | unsigned long host_sstateen0; |
34bde9d8 AP |
198 | |
199 | /* CPU context of Host */ | |
200 | struct kvm_cpu_context host_context; | |
201 | ||
a33c72fa AP |
202 | /* CPU context of Guest VCPU */ |
203 | struct kvm_cpu_context guest_context; | |
204 | ||
205 | /* CPU CSR context of Guest VCPU */ | |
206 | struct kvm_vcpu_csr guest_csr; | |
207 | ||
81f0f314 MC |
208 | /* CPU Smstateen CSR context of Guest VCPU */ |
209 | struct kvm_vcpu_smstateen_csr smstateen_csr; | |
210 | ||
a33c72fa AP |
211 | /* CPU context upon Guest VCPU reset */ |
212 | struct kvm_cpu_context guest_reset_context; | |
213 | ||
214 | /* CPU CSR context upon Guest VCPU reset */ | |
215 | struct kvm_vcpu_csr guest_reset_csr; | |
216 | ||
cce69aff AP |
217 | /* |
218 | * VCPU interrupts | |
219 | * | |
220 | * We have a lockless approach for tracking pending VCPU interrupts | |
221 | * implemented using atomic bitops. The irqs_pending bitmap represent | |
222 | * pending interrupts whereas irqs_pending_mask represent bits changed | |
223 | * in irqs_pending. Our approach is modeled around multiple producer | |
224 | * and single consumer problem where the consumer is the VCPU itself. | |
225 | */ | |
6b1e8ba4 AP |
226 | #define KVM_RISCV_VCPU_NR_IRQS 64 |
227 | DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS); | |
228 | DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); | |
cce69aff | 229 | |
3a9f66cb AP |
230 | /* VCPU Timer */ |
231 | struct kvm_vcpu_timer timer; | |
232 | ||
13acfec2 AP |
233 | /* HFENCE request queue */ |
234 | spinlock_t hfence_lock; | |
235 | unsigned long hfence_head; | |
236 | unsigned long hfence_tail; | |
237 | struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; | |
238 | ||
9f701326 AP |
239 | /* MMIO instruction details */ |
240 | struct kvm_mmio_decode mmio_decode; | |
241 | ||
8a061562 AP |
242 | /* CSR instruction details */ |
243 | struct kvm_csr_decode csr_decode; | |
244 | ||
dea8ee31 | 245 | /* SBI context */ |
23fe562e | 246 | struct kvm_vcpu_sbi_context sbi_context; |
dea8ee31 | 247 | |
54e43320 AP |
248 | /* AIA VCPU context */ |
249 | struct kvm_vcpu_aia aia_context; | |
250 | ||
9d05c1fe | 251 | /* Cache pages needed to program page tables with spinlock held */ |
cc4f602b | 252 | struct kvm_mmu_memory_cache mmu_page_cache; |
9d05c1fe | 253 | |
cce69aff AP |
254 | /* VCPU power-off state */ |
255 | bool power_off; | |
256 | ||
99cdc6c1 AP |
257 | /* Don't run the VCPU (blocked) */ |
258 | bool pause; | |
8f0153ec AP |
259 | |
260 | /* Performance monitoring context */ | |
261 | struct kvm_pmu pmu_context; | |
fe0bab70 MC |
262 | |
263 | /* 'static' configurations which are set only once */ | |
264 | struct kvm_vcpu_config cfg; | |
99cdc6c1 AP |
265 | }; |
266 | ||
99cdc6c1 AP |
267 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
268 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | |
99cdc6c1 | 269 | |
9955371c AP |
270 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
271 | ||
2415e46e AP |
272 | #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 |
273 | ||
274 | void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, | |
275 | gpa_t gpa, gpa_t gpsz, | |
276 | unsigned long order); | |
277 | void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); | |
278 | void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, | |
279 | unsigned long order); | |
280 | void kvm_riscv_local_hfence_gvma_all(void); | |
281 | void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, | |
282 | unsigned long asid, | |
283 | unsigned long gva, | |
284 | unsigned long gvsz, | |
285 | unsigned long order); | |
286 | void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, | |
287 | unsigned long asid); | |
288 | void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, | |
289 | unsigned long gva, unsigned long gvsz, | |
290 | unsigned long order); | |
291 | void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); | |
fd7bb4a2 | 292 | |
92e45050 AP |
293 | void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); |
294 | ||
13acfec2 AP |
295 | void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); |
296 | void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); | |
297 | void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); | |
298 | void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); | |
299 | ||
300 | void kvm_riscv_fence_i(struct kvm *kvm, | |
301 | unsigned long hbase, unsigned long hmask); | |
302 | void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, | |
303 | unsigned long hbase, unsigned long hmask, | |
304 | gpa_t gpa, gpa_t gpsz, | |
305 | unsigned long order); | |
306 | void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, | |
307 | unsigned long hbase, unsigned long hmask); | |
308 | void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, | |
309 | unsigned long hbase, unsigned long hmask, | |
310 | unsigned long gva, unsigned long gvsz, | |
311 | unsigned long order, unsigned long asid); | |
312 | void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, | |
313 | unsigned long hbase, unsigned long hmask, | |
314 | unsigned long asid); | |
315 | void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, | |
316 | unsigned long hbase, unsigned long hmask, | |
317 | unsigned long gva, unsigned long gvsz, | |
318 | unsigned long order); | |
319 | void kvm_riscv_hfence_vvma_all(struct kvm *kvm, | |
320 | unsigned long hbase, unsigned long hmask); | |
321 | ||
c9d57373 AP |
322 | int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, |
323 | phys_addr_t hpa, unsigned long size, | |
324 | bool writable, bool in_atomic); | |
325 | void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, | |
326 | unsigned long size); | |
26708234 | 327 | int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, |
9f701326 AP |
328 | struct kvm_memory_slot *memslot, |
329 | gpa_t gpa, unsigned long hva, bool is_write); | |
26708234 AP |
330 | int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); |
331 | void kvm_riscv_gstage_free_pgd(struct kvm *kvm); | |
332 | void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); | |
45b66dc1 SC |
333 | void __init kvm_riscv_gstage_mode_detect(void); |
334 | unsigned long __init kvm_riscv_gstage_mode(void); | |
26708234 AP |
335 | int kvm_riscv_gstage_gpa_bits(void); |
336 | ||
45b66dc1 | 337 | void __init kvm_riscv_gstage_vmid_detect(void); |
26708234 AP |
338 | unsigned long kvm_riscv_gstage_vmid_bits(void); |
339 | int kvm_riscv_gstage_vmid_init(struct kvm *kvm); | |
340 | bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); | |
341 | void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); | |
99cdc6c1 | 342 | |
00f918f6 AP |
343 | int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines); |
344 | ||
9f701326 AP |
345 | void __kvm_riscv_unpriv_trap(void); |
346 | ||
347 | unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, | |
348 | bool read_insn, | |
349 | unsigned long guest_addr, | |
350 | struct kvm_cpu_trap *trap); | |
351 | void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, | |
352 | struct kvm_cpu_trap *trap); | |
99cdc6c1 AP |
353 | int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
354 | struct kvm_cpu_trap *trap); | |
355 | ||
34bde9d8 | 356 | void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); |
99cdc6c1 | 357 | |
e98b1085 | 358 | void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu); |
031f9efa HX |
359 | unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu); |
360 | int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, | |
361 | u64 __user *uindices); | |
e98b1085 AP |
362 | int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, |
363 | const struct kvm_one_reg *reg); | |
364 | int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, | |
365 | const struct kvm_one_reg *reg); | |
366 | ||
cce69aff AP |
367 | int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); |
368 | int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); | |
369 | void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); | |
370 | void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); | |
6b1e8ba4 | 371 | bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); |
cce69aff AP |
372 | void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); |
373 | void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); | |
374 | ||
99cdc6c1 | 375 | #endif /* __RISCV_KVM_HOST_H__ */ |