]>
Commit | Line | Data |
---|---|---|
99cdc6c1 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. | |
4 | * | |
5 | * Authors: | |
6 | * Anup Patel <anup.patel@wdc.com> | |
7 | */ | |
8 | ||
9 | #include <linux/bitops.h> | |
9c00fbdd | 10 | #include <linux/entry-kvm.h> |
99cdc6c1 AP |
11 | #include <linux/errno.h> |
12 | #include <linux/err.h> | |
13 | #include <linux/kdebug.h> | |
14 | #include <linux/module.h> | |
cce69aff | 15 | #include <linux/percpu.h> |
99cdc6c1 AP |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/sched/signal.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/kvm_host.h> | |
20 | #include <asm/csr.h> | |
afd5dde9 | 21 | #include <asm/cacheflush.h> |
0f4b8257 | 22 | #include <asm/kvm_vcpu_vector.h> |
99cdc6c1 AP |
23 | |
24 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { | |
25 | KVM_GENERIC_VCPU_STATS(), | |
26 | STATS_DESC_COUNTER(VCPU, ecall_exit_stat), | |
27 | STATS_DESC_COUNTER(VCPU, wfi_exit_stat), | |
28 | STATS_DESC_COUNTER(VCPU, mmio_exit_user), | |
29 | STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), | |
8a061562 AP |
30 | STATS_DESC_COUNTER(VCPU, csr_exit_user), |
31 | STATS_DESC_COUNTER(VCPU, csr_exit_kernel), | |
54ce3f7f | 32 | STATS_DESC_COUNTER(VCPU, signal_exits), |
99cdc6c1 AP |
33 | STATS_DESC_COUNTER(VCPU, exits) |
34 | }; | |
35 | ||
36 | const struct kvm_stats_header kvm_vcpu_stats_header = { | |
37 | .name_size = KVM_STATS_NAME_SIZE, | |
38 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), | |
39 | .id_offset = sizeof(struct kvm_stats_header), | |
40 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, | |
41 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + | |
42 | sizeof(kvm_vcpu_stats_desc), | |
43 | }; | |
44 | ||
a33c72fa AP |
45 | static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) |
46 | { | |
47 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; | |
48 | struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; | |
49 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; | |
50 | struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; | |
3e1d8656 AP |
51 | bool loaded; |
52 | ||
53 | /** | |
54 | * The preemption should be disabled here because it races with | |
55 | * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which | |
56 | * also calls vcpu_load/put. | |
57 | */ | |
58 | get_cpu(); | |
59 | loaded = (vcpu->cpu != -1); | |
60 | if (loaded) | |
61 | kvm_arch_vcpu_put(vcpu); | |
a33c72fa | 62 | |
92e45050 AP |
63 | vcpu->arch.last_exit_cpu = -1; |
64 | ||
a33c72fa AP |
65 | memcpy(csr, reset_csr, sizeof(*csr)); |
66 | ||
67 | memcpy(cntx, reset_cntx, sizeof(*cntx)); | |
cce69aff | 68 | |
5de52d4a AP |
69 | kvm_riscv_vcpu_fp_reset(vcpu); |
70 | ||
0f4b8257 VC |
71 | kvm_riscv_vcpu_vector_reset(vcpu); |
72 | ||
3a9f66cb AP |
73 | kvm_riscv_vcpu_timer_reset(vcpu); |
74 | ||
54e43320 AP |
75 | kvm_riscv_vcpu_aia_reset(vcpu); |
76 | ||
6b1e8ba4 AP |
77 | bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); |
78 | bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); | |
3e1d8656 | 79 | |
8f0153ec AP |
80 | kvm_riscv_vcpu_pmu_reset(vcpu); |
81 | ||
13acfec2 AP |
82 | vcpu->arch.hfence_head = 0; |
83 | vcpu->arch.hfence_tail = 0; | |
84 | memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); | |
85 | ||
3e1d8656 AP |
86 | /* Reset the guest CSRs for hotplug usecase */ |
87 | if (loaded) | |
88 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | |
89 | put_cpu(); | |
a33c72fa AP |
90 | } |
91 | ||
99cdc6c1 AP |
92 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
93 | { | |
94 | return 0; | |
95 | } | |
96 | ||
97 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) | |
98 | { | |
54e43320 | 99 | int rc; |
a33c72fa | 100 | struct kvm_cpu_context *cntx; |
de1d7b6a | 101 | struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; |
a33c72fa AP |
102 | |
103 | /* Mark this VCPU never ran */ | |
104 | vcpu->arch.ran_atleast_once = false; | |
cc4f602b | 105 | vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; |
9bfd900b | 106 | bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); |
a33c72fa AP |
107 | |
108 | /* Setup ISA features available to VCPU */ | |
e98b1085 | 109 | kvm_riscv_vcpu_setup_isa(vcpu); |
a33c72fa | 110 | |
52ec4b69 AP |
111 | /* Setup vendor, arch, and implementation details */ |
112 | vcpu->arch.mvendorid = sbi_get_mvendorid(); | |
113 | vcpu->arch.marchid = sbi_get_marchid(); | |
114 | vcpu->arch.mimpid = sbi_get_mimpid(); | |
115 | ||
13acfec2 AP |
116 | /* Setup VCPU hfence queue */ |
117 | spin_lock_init(&vcpu->arch.hfence_lock); | |
118 | ||
a33c72fa AP |
119 | /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */ |
120 | cntx = &vcpu->arch.guest_reset_context; | |
121 | cntx->sstatus = SR_SPP | SR_SPIE; | |
122 | cntx->hstatus = 0; | |
123 | cntx->hstatus |= HSTATUS_VTW; | |
124 | cntx->hstatus |= HSTATUS_SPVP; | |
125 | cntx->hstatus |= HSTATUS_SPV; | |
126 | ||
0f4b8257 VC |
127 | if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) |
128 | return -ENOMEM; | |
129 | ||
de1d7b6a MC |
130 | /* By default, make CY, TM, and IR counters accessible in VU mode */ |
131 | reset_csr->scounteren = 0x7; | |
132 | ||
3a9f66cb AP |
133 | /* Setup VCPU timer */ |
134 | kvm_riscv_vcpu_timer_init(vcpu); | |
135 | ||
8f0153ec AP |
136 | /* setup performance monitoring */ |
137 | kvm_riscv_vcpu_pmu_init(vcpu); | |
138 | ||
54e43320 AP |
139 | /* Setup VCPU AIA */ |
140 | rc = kvm_riscv_vcpu_aia_init(vcpu); | |
141 | if (rc) | |
142 | return rc; | |
143 | ||
56d8a385 AP |
144 | /* |
145 | * Setup SBI extensions | |
146 | * NOTE: This must be the last thing to be initialized. | |
147 | */ | |
148 | kvm_riscv_vcpu_sbi_init(vcpu); | |
149 | ||
a33c72fa AP |
150 | /* Reset VCPU */ |
151 | kvm_riscv_reset_vcpu(vcpu); | |
152 | ||
99cdc6c1 AP |
153 | return 0; |
154 | } | |
155 | ||
156 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |
157 | { | |
3e1d8656 AP |
158 | /** |
159 | * vcpu with id 0 is the designated boot cpu. | |
160 | * Keep all vcpus with non-zero id in power-off state so that | |
161 | * they can be brought up using SBI HSM extension. | |
162 | */ | |
163 | if (vcpu->vcpu_idx != 0) | |
164 | kvm_riscv_vcpu_power_off(vcpu); | |
99cdc6c1 AP |
165 | } |
166 | ||
99cdc6c1 AP |
167 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
168 | { | |
54e43320 AP |
169 | /* Cleanup VCPU AIA context */ |
170 | kvm_riscv_vcpu_aia_deinit(vcpu); | |
171 | ||
3a9f66cb AP |
172 | /* Cleanup VCPU timer */ |
173 | kvm_riscv_vcpu_timer_deinit(vcpu); | |
174 | ||
8f0153ec AP |
175 | kvm_riscv_vcpu_pmu_deinit(vcpu); |
176 | ||
26708234 | 177 | /* Free unused pages pre-allocated for G-stage page table mappings */ |
cc4f602b | 178 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
0f4b8257 VC |
179 | |
180 | /* Free vector context space for host and guest kernel */ | |
181 | kvm_riscv_vcpu_free_vector_context(vcpu); | |
99cdc6c1 AP |
182 | } |
183 | ||
184 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
185 | { | |
8f5cb44b | 186 | return kvm_riscv_vcpu_timer_pending(vcpu); |
99cdc6c1 AP |
187 | } |
188 | ||
189 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) | |
190 | { | |
77cf33c1 | 191 | kvm_riscv_aia_wakeon_hgei(vcpu, true); |
99cdc6c1 AP |
192 | } |
193 | ||
194 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) | |
195 | { | |
77cf33c1 | 196 | kvm_riscv_aia_wakeon_hgei(vcpu, false); |
99cdc6c1 AP |
197 | } |
198 | ||
199 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
200 | { | |
cce69aff AP |
201 | return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && |
202 | !vcpu->arch.power_off && !vcpu->arch.pause); | |
99cdc6c1 AP |
203 | } |
204 | ||
205 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |
206 | { | |
cce69aff | 207 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
99cdc6c1 AP |
208 | } |
209 | ||
210 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) | |
211 | { | |
cce69aff | 212 | return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; |
99cdc6c1 AP |
213 | } |
214 | ||
215 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | |
216 | { | |
217 | return VM_FAULT_SIGBUS; | |
218 | } | |
219 | ||
220 | long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
221 | unsigned int ioctl, unsigned long arg) | |
222 | { | |
cce69aff AP |
223 | struct kvm_vcpu *vcpu = filp->private_data; |
224 | void __user *argp = (void __user *)arg; | |
225 | ||
226 | if (ioctl == KVM_INTERRUPT) { | |
227 | struct kvm_interrupt irq; | |
228 | ||
229 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
230 | return -EFAULT; | |
231 | ||
232 | if (irq.irq == KVM_INTERRUPT_SET) | |
233 | return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT); | |
234 | else | |
235 | return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); | |
236 | } | |
237 | ||
99cdc6c1 AP |
238 | return -ENOIOCTLCMD; |
239 | } | |
240 | ||
241 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
242 | unsigned int ioctl, unsigned long arg) | |
243 | { | |
92ad8200 AP |
244 | struct kvm_vcpu *vcpu = filp->private_data; |
245 | void __user *argp = (void __user *)arg; | |
246 | long r = -EINVAL; | |
247 | ||
248 | switch (ioctl) { | |
249 | case KVM_SET_ONE_REG: | |
250 | case KVM_GET_ONE_REG: { | |
251 | struct kvm_one_reg reg; | |
252 | ||
253 | r = -EFAULT; | |
254 | if (copy_from_user(®, argp, sizeof(reg))) | |
255 | break; | |
256 | ||
257 | if (ioctl == KVM_SET_ONE_REG) | |
258 | r = kvm_riscv_vcpu_set_reg(vcpu, ®); | |
259 | else | |
260 | r = kvm_riscv_vcpu_get_reg(vcpu, ®); | |
261 | break; | |
262 | } | |
031f9efa HX |
263 | case KVM_GET_REG_LIST: { |
264 | struct kvm_reg_list __user *user_list = argp; | |
265 | struct kvm_reg_list reg_list; | |
266 | unsigned int n; | |
267 | ||
268 | r = -EFAULT; | |
269 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | |
270 | break; | |
271 | n = reg_list.n; | |
272 | reg_list.n = kvm_riscv_vcpu_num_regs(vcpu); | |
273 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | |
274 | break; | |
275 | r = -E2BIG; | |
276 | if (n < reg_list.n) | |
277 | break; | |
278 | r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); | |
279 | break; | |
280 | } | |
92ad8200 AP |
281 | default: |
282 | break; | |
283 | } | |
284 | ||
285 | return r; | |
99cdc6c1 AP |
286 | } |
287 | ||
288 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
289 | struct kvm_sregs *sregs) | |
290 | { | |
291 | return -EINVAL; | |
292 | } | |
293 | ||
294 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
295 | struct kvm_sregs *sregs) | |
296 | { | |
297 | return -EINVAL; | |
298 | } | |
299 | ||
300 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
301 | { | |
302 | return -EINVAL; | |
303 | } | |
304 | ||
305 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
306 | { | |
307 | return -EINVAL; | |
308 | } | |
309 | ||
310 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
311 | struct kvm_translation *tr) | |
312 | { | |
313 | return -EINVAL; | |
314 | } | |
315 | ||
316 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
317 | { | |
318 | return -EINVAL; | |
319 | } | |
320 | ||
321 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
322 | { | |
323 | return -EINVAL; | |
324 | } | |
325 | ||
cce69aff AP |
326 | void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu) |
327 | { | |
328 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; | |
329 | unsigned long mask, val; | |
330 | ||
6b1e8ba4 AP |
331 | if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { |
332 | mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); | |
333 | val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; | |
cce69aff AP |
334 | |
335 | csr->hvip &= ~mask; | |
336 | csr->hvip |= val; | |
337 | } | |
54e43320 AP |
338 | |
339 | /* Flush AIA high interrupts */ | |
340 | kvm_riscv_vcpu_aia_flush_interrupts(vcpu); | |
cce69aff AP |
341 | } |
342 | ||
343 | void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) | |
344 | { | |
345 | unsigned long hvip; | |
346 | struct kvm_vcpu_arch *v = &vcpu->arch; | |
347 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; | |
348 | ||
349 | /* Read current HVIP and VSIE CSRs */ | |
350 | csr->vsie = csr_read(CSR_VSIE); | |
351 | ||
352 | /* Sync-up HVIP.VSSIP bit changes does by Guest */ | |
353 | hvip = csr_read(CSR_HVIP); | |
354 | if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { | |
355 | if (hvip & (1UL << IRQ_VS_SOFT)) { | |
356 | if (!test_and_set_bit(IRQ_VS_SOFT, | |
6b1e8ba4 AP |
357 | v->irqs_pending_mask)) |
358 | set_bit(IRQ_VS_SOFT, v->irqs_pending); | |
cce69aff AP |
359 | } else { |
360 | if (!test_and_set_bit(IRQ_VS_SOFT, | |
6b1e8ba4 AP |
361 | v->irqs_pending_mask)) |
362 | clear_bit(IRQ_VS_SOFT, v->irqs_pending); | |
cce69aff AP |
363 | } |
364 | } | |
cea8896b | 365 | |
54e43320 AP |
366 | /* Sync-up AIA high interrupts */ |
367 | kvm_riscv_vcpu_aia_sync_interrupts(vcpu); | |
368 | ||
cea8896b AP |
369 | /* Sync-up timer CSRs */ |
370 | kvm_riscv_vcpu_timer_sync(vcpu); | |
cce69aff AP |
371 | } |
372 | ||
373 | int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) | |
374 | { | |
6b1e8ba4 AP |
375 | /* |
376 | * We only allow VS-mode software, timer, and external | |
377 | * interrupts when irq is one of the local interrupts | |
378 | * defined by RISC-V privilege specification. | |
379 | */ | |
380 | if (irq < IRQ_LOCAL_MAX && | |
381 | irq != IRQ_VS_SOFT && | |
cce69aff AP |
382 | irq != IRQ_VS_TIMER && |
383 | irq != IRQ_VS_EXT) | |
384 | return -EINVAL; | |
385 | ||
6b1e8ba4 | 386 | set_bit(irq, vcpu->arch.irqs_pending); |
cce69aff | 387 | smp_mb__before_atomic(); |
6b1e8ba4 | 388 | set_bit(irq, vcpu->arch.irqs_pending_mask); |
cce69aff AP |
389 | |
390 | kvm_vcpu_kick(vcpu); | |
391 | ||
392 | return 0; | |
393 | } | |
394 | ||
395 | int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) | |
396 | { | |
6b1e8ba4 AP |
397 | /* |
398 | * We only allow VS-mode software, timer, and external | |
399 | * interrupts when irq is one of the local interrupts | |
400 | * defined by RISC-V privilege specification. | |
401 | */ | |
402 | if (irq < IRQ_LOCAL_MAX && | |
403 | irq != IRQ_VS_SOFT && | |
cce69aff AP |
404 | irq != IRQ_VS_TIMER && |
405 | irq != IRQ_VS_EXT) | |
406 | return -EINVAL; | |
407 | ||
6b1e8ba4 | 408 | clear_bit(irq, vcpu->arch.irqs_pending); |
cce69aff | 409 | smp_mb__before_atomic(); |
6b1e8ba4 | 410 | set_bit(irq, vcpu->arch.irqs_pending_mask); |
cce69aff AP |
411 | |
412 | return 0; | |
413 | } | |
414 | ||
6b1e8ba4 | 415 | bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) |
cce69aff | 416 | { |
54e43320 AP |
417 | unsigned long ie; |
418 | ||
419 | ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) | |
6b1e8ba4 AP |
420 | << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask; |
421 | ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & | |
422 | (unsigned long)mask; | |
423 | if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) | |
54e43320 | 424 | return true; |
cce69aff | 425 | |
54e43320 AP |
426 | /* Check AIA high interrupts */ |
427 | return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); | |
cce69aff AP |
428 | } |
429 | ||
430 | void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu) | |
431 | { | |
432 | vcpu->arch.power_off = true; | |
433 | kvm_make_request(KVM_REQ_SLEEP, vcpu); | |
434 | kvm_vcpu_kick(vcpu); | |
435 | } | |
436 | ||
437 | void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu) | |
438 | { | |
439 | vcpu->arch.power_off = false; | |
440 | kvm_vcpu_wake_up(vcpu); | |
441 | } | |
442 | ||
99cdc6c1 AP |
443 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
444 | struct kvm_mp_state *mp_state) | |
445 | { | |
cce69aff AP |
446 | if (vcpu->arch.power_off) |
447 | mp_state->mp_state = KVM_MP_STATE_STOPPED; | |
448 | else | |
449 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; | |
450 | ||
99cdc6c1 AP |
451 | return 0; |
452 | } | |
453 | ||
454 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
455 | struct kvm_mp_state *mp_state) | |
456 | { | |
cce69aff AP |
457 | int ret = 0; |
458 | ||
459 | switch (mp_state->mp_state) { | |
460 | case KVM_MP_STATE_RUNNABLE: | |
461 | vcpu->arch.power_off = false; | |
462 | break; | |
463 | case KVM_MP_STATE_STOPPED: | |
464 | kvm_riscv_vcpu_power_off(vcpu); | |
465 | break; | |
466 | default: | |
467 | ret = -EINVAL; | |
468 | } | |
469 | ||
470 | return ret; | |
99cdc6c1 AP |
471 | } |
472 | ||
473 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |
474 | struct kvm_guest_debug *dbg) | |
475 | { | |
476 | /* TODO; To be implemented later. */ | |
477 | return -EINVAL; | |
478 | } | |
479 | ||
fe0bab70 | 480 | static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) |
6bb2e00e | 481 | { |
fe0bab70 MC |
482 | const unsigned long *isa = vcpu->arch.isa; |
483 | struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; | |
6bb2e00e | 484 | |
56852c62 | 485 | if (riscv_isa_extension_available(isa, SVPBMT)) |
fe0bab70 | 486 | cfg->henvcfg |= ENVCFG_PBMTE; |
6bb2e00e | 487 | |
56852c62 | 488 | if (riscv_isa_extension_available(isa, SSTC)) |
fe0bab70 | 489 | cfg->henvcfg |= ENVCFG_STCE; |
56852c62 AJ |
490 | |
491 | if (riscv_isa_extension_available(isa, ZICBOM)) | |
fe0bab70 | 492 | cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); |
56852c62 | 493 | |
b20f6799 | 494 | if (riscv_isa_extension_available(isa, ZICBOZ)) |
fe0bab70 | 495 | cfg->henvcfg |= ENVCFG_CBZE; |
d21b5d34 MC |
496 | |
497 | if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { | |
498 | cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; | |
499 | if (riscv_isa_extension_available(isa, SSAIA)) | |
500 | cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | | |
501 | SMSTATEEN0_AIA | | |
502 | SMSTATEEN0_AIA_ISEL; | |
503 | if (riscv_isa_extension_available(isa, SMSTATEEN)) | |
504 | cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; | |
505 | } | |
6bb2e00e AP |
506 | } |
507 | ||
99cdc6c1 AP |
508 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
509 | { | |
34bde9d8 | 510 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
fe0bab70 | 511 | struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; |
34bde9d8 AP |
512 | |
513 | csr_write(CSR_VSSTATUS, csr->vsstatus); | |
514 | csr_write(CSR_VSIE, csr->vsie); | |
515 | csr_write(CSR_VSTVEC, csr->vstvec); | |
516 | csr_write(CSR_VSSCRATCH, csr->vsscratch); | |
517 | csr_write(CSR_VSEPC, csr->vsepc); | |
518 | csr_write(CSR_VSCAUSE, csr->vscause); | |
519 | csr_write(CSR_VSTVAL, csr->vstval); | |
520 | csr_write(CSR_HVIP, csr->hvip); | |
521 | csr_write(CSR_VSATP, csr->vsatp); | |
fe0bab70 MC |
522 | csr_write(CSR_HENVCFG, cfg->henvcfg); |
523 | if (IS_ENABLED(CONFIG_32BIT)) | |
524 | csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); | |
d21b5d34 MC |
525 | if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { |
526 | csr_write(CSR_HSTATEEN0, cfg->hstateen0); | |
527 | if (IS_ENABLED(CONFIG_32BIT)) | |
528 | csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); | |
529 | } | |
6bb2e00e | 530 | |
26708234 | 531 | kvm_riscv_gstage_update_hgatp(vcpu); |
34bde9d8 | 532 | |
3a9f66cb AP |
533 | kvm_riscv_vcpu_timer_restore(vcpu); |
534 | ||
5de52d4a AP |
535 | kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); |
536 | kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, | |
537 | vcpu->arch.isa); | |
0f4b8257 VC |
538 | kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); |
539 | kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, | |
540 | vcpu->arch.isa); | |
5de52d4a | 541 | |
54e43320 AP |
542 | kvm_riscv_vcpu_aia_load(vcpu, cpu); |
543 | ||
34bde9d8 | 544 | vcpu->cpu = cpu; |
99cdc6c1 AP |
545 | } |
546 | ||
547 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
548 | { | |
34bde9d8 AP |
549 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
550 | ||
551 | vcpu->cpu = -1; | |
552 | ||
54e43320 AP |
553 | kvm_riscv_vcpu_aia_put(vcpu); |
554 | ||
5de52d4a AP |
555 | kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, |
556 | vcpu->arch.isa); | |
557 | kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); | |
558 | ||
8f5cb44b | 559 | kvm_riscv_vcpu_timer_save(vcpu); |
0f4b8257 VC |
560 | kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, |
561 | vcpu->arch.isa); | |
562 | kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); | |
8f5cb44b | 563 | |
34bde9d8 AP |
564 | csr->vsstatus = csr_read(CSR_VSSTATUS); |
565 | csr->vsie = csr_read(CSR_VSIE); | |
566 | csr->vstvec = csr_read(CSR_VSTVEC); | |
567 | csr->vsscratch = csr_read(CSR_VSSCRATCH); | |
568 | csr->vsepc = csr_read(CSR_VSEPC); | |
569 | csr->vscause = csr_read(CSR_VSCAUSE); | |
570 | csr->vstval = csr_read(CSR_VSTVAL); | |
571 | csr->hvip = csr_read(CSR_HVIP); | |
572 | csr->vsatp = csr_read(CSR_VSATP); | |
99cdc6c1 AP |
573 | } |
574 | ||
575 | static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) | |
576 | { | |
cce69aff AP |
577 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
578 | ||
579 | if (kvm_request_pending(vcpu)) { | |
580 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { | |
be82abe6 | 581 | kvm_vcpu_srcu_read_unlock(vcpu); |
cce69aff AP |
582 | rcuwait_wait_event(wait, |
583 | (!vcpu->arch.power_off) && (!vcpu->arch.pause), | |
584 | TASK_INTERRUPTIBLE); | |
be82abe6 | 585 | kvm_vcpu_srcu_read_lock(vcpu); |
cce69aff AP |
586 | |
587 | if (vcpu->arch.power_off || vcpu->arch.pause) { | |
588 | /* | |
589 | * Awaken to handle a signal, request to | |
590 | * sleep again later. | |
591 | */ | |
592 | kvm_make_request(KVM_REQ_SLEEP, vcpu); | |
593 | } | |
594 | } | |
595 | ||
596 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) | |
597 | kvm_riscv_reset_vcpu(vcpu); | |
fd7bb4a2 AP |
598 | |
599 | if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu)) | |
26708234 | 600 | kvm_riscv_gstage_update_hgatp(vcpu); |
fd7bb4a2 | 601 | |
13acfec2 AP |
602 | if (kvm_check_request(KVM_REQ_FENCE_I, vcpu)) |
603 | kvm_riscv_fence_i_process(vcpu); | |
604 | ||
605 | /* | |
606 | * The generic KVM_REQ_TLB_FLUSH is same as | |
607 | * KVM_REQ_HFENCE_GVMA_VMID_ALL | |
608 | */ | |
609 | if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu)) | |
610 | kvm_riscv_hfence_gvma_vmid_all_process(vcpu); | |
611 | ||
612 | if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu)) | |
613 | kvm_riscv_hfence_vvma_all_process(vcpu); | |
614 | ||
615 | if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) | |
616 | kvm_riscv_hfence_process(vcpu); | |
cce69aff AP |
617 | } |
618 | } | |
619 | ||
620 | static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) | |
621 | { | |
622 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; | |
623 | ||
624 | csr_write(CSR_HVIP, csr->hvip); | |
54e43320 | 625 | kvm_riscv_vcpu_aia_update_hvip(vcpu); |
99cdc6c1 AP |
626 | } |
627 | ||
db3c01c7 MC |
628 | static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) |
629 | { | |
81f0f314 | 630 | struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; |
db3c01c7 | 631 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
81f0f314 | 632 | struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; |
db3c01c7 MC |
633 | |
634 | vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); | |
81f0f314 MC |
635 | if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && |
636 | (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) | |
637 | vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, | |
638 | smcsr->sstateen0); | |
db3c01c7 MC |
639 | } |
640 | ||
641 | static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) | |
642 | { | |
81f0f314 | 643 | struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; |
db3c01c7 | 644 | struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; |
81f0f314 | 645 | struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; |
db3c01c7 MC |
646 | |
647 | csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); | |
81f0f314 MC |
648 | if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && |
649 | (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) | |
650 | smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, | |
651 | vcpu->arch.host_sstateen0); | |
db3c01c7 MC |
652 | } |
653 | ||
6455317e MR |
654 | /* |
655 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while | |
656 | * the vCPU is running. | |
657 | * | |
658 | * This must be noinstr as instrumentation may make use of RCU, and this is not | |
659 | * safe during the EQS. | |
660 | */ | |
661 | static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) | |
662 | { | |
db3c01c7 | 663 | kvm_riscv_vcpu_swap_in_guest_state(vcpu); |
6455317e MR |
664 | guest_state_enter_irqoff(); |
665 | __kvm_riscv_switch_to(&vcpu->arch); | |
92e45050 | 666 | vcpu->arch.last_exit_cpu = vcpu->cpu; |
6455317e | 667 | guest_state_exit_irqoff(); |
db3c01c7 | 668 | kvm_riscv_vcpu_swap_in_host_state(vcpu); |
6455317e MR |
669 | } |
670 | ||
99cdc6c1 AP |
671 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
672 | { | |
673 | int ret; | |
674 | struct kvm_cpu_trap trap; | |
675 | struct kvm_run *run = vcpu->run; | |
676 | ||
fe0bab70 MC |
677 | if (!vcpu->arch.ran_atleast_once) |
678 | kvm_riscv_vcpu_setup_config(vcpu); | |
679 | ||
a33c72fa AP |
680 | /* Mark this VCPU ran at least once */ |
681 | vcpu->arch.ran_atleast_once = true; | |
682 | ||
2031f287 | 683 | kvm_vcpu_srcu_read_lock(vcpu); |
99cdc6c1 | 684 | |
8a061562 AP |
685 | switch (run->exit_reason) { |
686 | case KVM_EXIT_MMIO: | |
687 | /* Process MMIO value returned from user-space */ | |
99cdc6c1 | 688 | ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); |
8a061562 AP |
689 | break; |
690 | case KVM_EXIT_RISCV_SBI: | |
691 | /* Process SBI value returned from user-space */ | |
dea8ee31 | 692 | ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); |
8a061562 AP |
693 | break; |
694 | case KVM_EXIT_RISCV_CSR: | |
695 | /* Process CSR value returned from user-space */ | |
696 | ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); | |
697 | break; | |
698 | default: | |
699 | ret = 0; | |
700 | break; | |
701 | } | |
702 | if (ret) { | |
703 | kvm_vcpu_srcu_read_unlock(vcpu); | |
704 | return ret; | |
99cdc6c1 AP |
705 | } |
706 | ||
707 | if (run->immediate_exit) { | |
2031f287 | 708 | kvm_vcpu_srcu_read_unlock(vcpu); |
99cdc6c1 AP |
709 | return -EINTR; |
710 | } | |
711 | ||
712 | vcpu_load(vcpu); | |
713 | ||
714 | kvm_sigset_activate(vcpu); | |
715 | ||
716 | ret = 1; | |
717 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
718 | while (ret > 0) { | |
719 | /* Check conditions before entering the guest */ | |
9c00fbdd | 720 | ret = xfer_to_guest_mode_handle_work(vcpu); |
af934432 AP |
721 | if (ret) |
722 | continue; | |
723 | ret = 1; | |
99cdc6c1 | 724 | |
26708234 | 725 | kvm_riscv_gstage_vmid_update(vcpu); |
fd7bb4a2 | 726 | |
99cdc6c1 AP |
727 | kvm_riscv_check_vcpu_requests(vcpu); |
728 | ||
54e43320 AP |
729 | preempt_disable(); |
730 | ||
731 | /* Update AIA HW state before entering guest */ | |
732 | ret = kvm_riscv_vcpu_aia_update(vcpu); | |
733 | if (ret <= 0) { | |
734 | preempt_enable(); | |
735 | continue; | |
736 | } | |
737 | ||
99cdc6c1 AP |
738 | local_irq_disable(); |
739 | ||
99cdc6c1 AP |
740 | /* |
741 | * Ensure we set mode to IN_GUEST_MODE after we disable | |
742 | * interrupts and before the final VCPU requests check. | |
743 | * See the comment in kvm_vcpu_exiting_guest_mode() and | |
636e36b1 | 744 | * Documentation/virt/kvm/vcpu-requests.rst |
99cdc6c1 AP |
745 | */ |
746 | vcpu->mode = IN_GUEST_MODE; | |
747 | ||
2031f287 | 748 | kvm_vcpu_srcu_read_unlock(vcpu); |
99cdc6c1 AP |
749 | smp_mb__after_srcu_read_unlock(); |
750 | ||
cce69aff AP |
751 | /* |
752 | * We might have got VCPU interrupts updated asynchronously | |
753 | * so update it in HW. | |
754 | */ | |
755 | kvm_riscv_vcpu_flush_interrupts(vcpu); | |
756 | ||
757 | /* Update HVIP CSR for current CPU */ | |
758 | kvm_riscv_update_hvip(vcpu); | |
759 | ||
99cdc6c1 | 760 | if (ret <= 0 || |
26708234 | 761 | kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || |
9c00fbdd JZ |
762 | kvm_request_pending(vcpu) || |
763 | xfer_to_guest_mode_work_pending()) { | |
99cdc6c1 AP |
764 | vcpu->mode = OUTSIDE_GUEST_MODE; |
765 | local_irq_enable(); | |
54e43320 | 766 | preempt_enable(); |
2031f287 | 767 | kvm_vcpu_srcu_read_lock(vcpu); |
99cdc6c1 AP |
768 | continue; |
769 | } | |
770 | ||
92e45050 AP |
771 | /* |
772 | * Cleanup stale TLB enteries | |
773 | * | |
774 | * Note: This should be done after G-stage VMID has been | |
775 | * updated using kvm_riscv_gstage_vmid_ver_changed() | |
776 | */ | |
777 | kvm_riscv_local_tlb_sanitize(vcpu); | |
778 | ||
6455317e | 779 | guest_timing_enter_irqoff(); |
99cdc6c1 | 780 | |
6455317e | 781 | kvm_riscv_vcpu_enter_exit(vcpu); |
99cdc6c1 AP |
782 | |
783 | vcpu->mode = OUTSIDE_GUEST_MODE; | |
784 | vcpu->stat.exits++; | |
785 | ||
786 | /* | |
787 | * Save SCAUSE, STVAL, HTVAL, and HTINST because we might | |
788 | * get an interrupt between __kvm_riscv_switch_to() and | |
789 | * local_irq_enable() which can potentially change CSRs. | |
790 | */ | |
a33c72fa | 791 | trap.sepc = vcpu->arch.guest_context.sepc; |
99cdc6c1 AP |
792 | trap.scause = csr_read(CSR_SCAUSE); |
793 | trap.stval = csr_read(CSR_STVAL); | |
794 | trap.htval = csr_read(CSR_HTVAL); | |
795 | trap.htinst = csr_read(CSR_HTINST); | |
796 | ||
cce69aff AP |
797 | /* Syncup interrupts state with HW */ |
798 | kvm_riscv_vcpu_sync_interrupts(vcpu); | |
799 | ||
99cdc6c1 | 800 | /* |
6455317e MR |
801 | * We must ensure that any pending interrupts are taken before |
802 | * we exit guest timing so that timer ticks are accounted as | |
803 | * guest time. Transiently unmask interrupts so that any | |
804 | * pending interrupts are taken. | |
99cdc6c1 | 805 | * |
6455317e MR |
806 | * There's no barrier which ensures that pending interrupts are |
807 | * recognised, so we just hope that the CPU takes any pending | |
808 | * interrupts between the enable and disable. | |
99cdc6c1 AP |
809 | */ |
810 | local_irq_enable(); | |
6455317e | 811 | local_irq_disable(); |
99cdc6c1 | 812 | |
6455317e MR |
813 | guest_timing_exit_irqoff(); |
814 | ||
815 | local_irq_enable(); | |
99cdc6c1 AP |
816 | |
817 | preempt_enable(); | |
818 | ||
2031f287 | 819 | kvm_vcpu_srcu_read_lock(vcpu); |
99cdc6c1 AP |
820 | |
821 | ret = kvm_riscv_vcpu_exit(vcpu, run, &trap); | |
822 | } | |
823 | ||
824 | kvm_sigset_deactivate(vcpu); | |
825 | ||
826 | vcpu_put(vcpu); | |
827 | ||
2031f287 | 828 | kvm_vcpu_srcu_read_unlock(vcpu); |
99cdc6c1 AP |
829 | |
830 | return ret; | |
831 | } |