]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/riscv/kvm/vcpu_onereg.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / arch / riscv / kvm / vcpu_onereg.c
CommitLineData
e98b1085
AP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10#include <linux/bitops.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/uaccess.h>
14#include <linux/kvm_host.h>
15#include <asm/cacheflush.h>
16#include <asm/hwcap.h>
17#include <asm/kvm_vcpu_vector.h>
18#include <asm/vector.h>
19
20#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
21
22#define KVM_ISA_EXT_ARR(ext) \
23[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24
25/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26static const unsigned long kvm_isa_ext_arr[] = {
d2064d4a 27 /* Single letter extensions (alphabetically sorted) */
e98b1085
AP
28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
d2064d4a 36 /* Multi letter extensions (alphabetically sorted) */
d21b5d34 37 KVM_ISA_EXT_ARR(SMSTATEEN),
e98b1085
AP
38 KVM_ISA_EXT_ARR(SSAIA),
39 KVM_ISA_EXT_ARR(SSTC),
40 KVM_ISA_EXT_ARR(SVINVAL),
41 KVM_ISA_EXT_ARR(SVNAPOT),
42 KVM_ISA_EXT_ARR(SVPBMT),
41716861 43 KVM_ISA_EXT_ARR(ZBA),
e98b1085 44 KVM_ISA_EXT_ARR(ZBB),
41716861 45 KVM_ISA_EXT_ARR(ZBS),
d2064d4a
AP
46 KVM_ISA_EXT_ARR(ZICBOM),
47 KVM_ISA_EXT_ARR(ZICBOZ),
043cba06 48 KVM_ISA_EXT_ARR(ZICNTR),
df68f4d8 49 KVM_ISA_EXT_ARR(ZICOND),
043cba06
AP
50 KVM_ISA_EXT_ARR(ZICSR),
51 KVM_ISA_EXT_ARR(ZIFENCEI),
e98b1085 52 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
043cba06 53 KVM_ISA_EXT_ARR(ZIHPM),
e98b1085
AP
54};
55
56static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
57{
58 unsigned long i;
59
60 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
61 if (kvm_isa_ext_arr[i] == base_ext)
62 return i;
63 }
64
65 return KVM_RISCV_ISA_EXT_MAX;
66}
67
68static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
69{
70 switch (ext) {
71 case KVM_RISCV_ISA_EXT_H:
72 return false;
73 case KVM_RISCV_ISA_EXT_V:
74 return riscv_v_vstate_ctrl_user_allowed();
75 default:
76 break;
77 }
78
79 return true;
80}
81
82static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
83{
84 switch (ext) {
d21b5d34 85 /* Extensions which don't have any mechanism to disable */
e98b1085
AP
86 case KVM_RISCV_ISA_EXT_A:
87 case KVM_RISCV_ISA_EXT_C:
88 case KVM_RISCV_ISA_EXT_I:
89 case KVM_RISCV_ISA_EXT_M:
e98b1085
AP
90 case KVM_RISCV_ISA_EXT_SSTC:
91 case KVM_RISCV_ISA_EXT_SVINVAL:
92 case KVM_RISCV_ISA_EXT_SVNAPOT:
d2064d4a
AP
93 case KVM_RISCV_ISA_EXT_ZBA:
94 case KVM_RISCV_ISA_EXT_ZBB:
95 case KVM_RISCV_ISA_EXT_ZBS:
043cba06 96 case KVM_RISCV_ISA_EXT_ZICNTR:
df68f4d8 97 case KVM_RISCV_ISA_EXT_ZICOND:
043cba06
AP
98 case KVM_RISCV_ISA_EXT_ZICSR:
99 case KVM_RISCV_ISA_EXT_ZIFENCEI:
e98b1085 100 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
043cba06 101 case KVM_RISCV_ISA_EXT_ZIHPM:
e98b1085 102 return false;
d21b5d34
MC
103 /* Extensions which can be disabled using Smstateen */
104 case KVM_RISCV_ISA_EXT_SSAIA:
105 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
e98b1085
AP
106 default:
107 break;
108 }
109
110 return true;
111}
112
113void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
114{
115 unsigned long host_isa, i;
116
117 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
118 host_isa = kvm_isa_ext_arr[i];
119 if (__riscv_isa_extension_available(NULL, host_isa) &&
120 kvm_riscv_vcpu_isa_enable_allowed(i))
121 set_bit(host_isa, vcpu->arch.isa);
122 }
123}
124
125static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
126 const struct kvm_one_reg *reg)
127{
128 unsigned long __user *uaddr =
129 (unsigned long __user *)(unsigned long)reg->addr;
130 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
131 KVM_REG_SIZE_MASK |
132 KVM_REG_RISCV_CONFIG);
133 unsigned long reg_val;
134
135 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
136 return -EINVAL;
137
138 switch (reg_num) {
139 case KVM_REG_RISCV_CONFIG_REG(isa):
140 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
141 break;
142 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
143 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
a044ef71 144 return -ENOENT;
e98b1085
AP
145 reg_val = riscv_cbom_block_size;
146 break;
147 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
148 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
a044ef71 149 return -ENOENT;
e98b1085
AP
150 reg_val = riscv_cboz_block_size;
151 break;
152 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
153 reg_val = vcpu->arch.mvendorid;
154 break;
155 case KVM_REG_RISCV_CONFIG_REG(marchid):
156 reg_val = vcpu->arch.marchid;
157 break;
158 case KVM_REG_RISCV_CONFIG_REG(mimpid):
159 reg_val = vcpu->arch.mimpid;
160 break;
2776421e
DHB
161 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
162 reg_val = satp_mode >> SATP_MODE_SHIFT;
163 break;
e98b1085 164 default:
2a88f38c 165 return -ENOENT;
e98b1085
AP
166 }
167
168 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
169 return -EFAULT;
170
171 return 0;
172}
173
174static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
175 const struct kvm_one_reg *reg)
176{
177 unsigned long __user *uaddr =
178 (unsigned long __user *)(unsigned long)reg->addr;
179 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
180 KVM_REG_SIZE_MASK |
181 KVM_REG_RISCV_CONFIG);
182 unsigned long i, isa_ext, reg_val;
183
184 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
185 return -EINVAL;
186
187 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
188 return -EFAULT;
189
190 switch (reg_num) {
191 case KVM_REG_RISCV_CONFIG_REG(isa):
192 /*
193 * This ONE REG interface is only defined for
194 * single letter extensions.
195 */
196 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
197 return -EINVAL;
198
bea8d237
DHB
199 /*
200 * Return early (i.e. do nothing) if reg_val is the same
201 * value retrievable via kvm_riscv_vcpu_get_reg_config().
202 */
203 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
204 break;
205
e98b1085
AP
206 if (!vcpu->arch.ran_atleast_once) {
207 /* Ignore the enable/disable request for certain extensions */
208 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
209 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
210 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
211 reg_val &= ~BIT(i);
212 continue;
213 }
214 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
215 if (reg_val & BIT(i))
216 reg_val &= ~BIT(i);
217 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
218 if (!(reg_val & BIT(i)))
219 reg_val |= BIT(i);
220 }
221 reg_val &= riscv_isa_extension_base(NULL);
222 /* Do not modify anything beyond single letter extensions */
223 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
224 (reg_val & KVM_RISCV_BASE_ISA_MASK);
225 vcpu->arch.isa[0] = reg_val;
226 kvm_riscv_vcpu_fp_reset(vcpu);
227 } else {
d57304bb 228 return -EBUSY;
e98b1085
AP
229 }
230 break;
231 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
e29f5791
DHB
232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
233 return -ENOENT;
234 if (reg_val != riscv_cbom_block_size)
235 return -EINVAL;
236 break;
e98b1085 237 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
e29f5791
DHB
238 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
239 return -ENOENT;
240 if (reg_val != riscv_cboz_block_size)
241 return -EINVAL;
242 break;
e98b1085 243 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
63bd6606
DHB
244 if (reg_val == vcpu->arch.mvendorid)
245 break;
e98b1085
AP
246 if (!vcpu->arch.ran_atleast_once)
247 vcpu->arch.mvendorid = reg_val;
248 else
249 return -EBUSY;
250 break;
251 case KVM_REG_RISCV_CONFIG_REG(marchid):
63bd6606
DHB
252 if (reg_val == vcpu->arch.marchid)
253 break;
e98b1085
AP
254 if (!vcpu->arch.ran_atleast_once)
255 vcpu->arch.marchid = reg_val;
256 else
257 return -EBUSY;
258 break;
259 case KVM_REG_RISCV_CONFIG_REG(mimpid):
63bd6606
DHB
260 if (reg_val == vcpu->arch.mimpid)
261 break;
e98b1085
AP
262 if (!vcpu->arch.ran_atleast_once)
263 vcpu->arch.mimpid = reg_val;
264 else
265 return -EBUSY;
266 break;
2776421e
DHB
267 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
268 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
269 return -EINVAL;
270 break;
e98b1085 271 default:
2a88f38c 272 return -ENOENT;
e98b1085
AP
273 }
274
275 return 0;
276}
277
278static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
279 const struct kvm_one_reg *reg)
280{
281 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
282 unsigned long __user *uaddr =
283 (unsigned long __user *)(unsigned long)reg->addr;
284 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
285 KVM_REG_SIZE_MASK |
286 KVM_REG_RISCV_CORE);
287 unsigned long reg_val;
288
289 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
290 return -EINVAL;
291 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
2a88f38c 292 return -ENOENT;
e98b1085
AP
293
294 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
295 reg_val = cntx->sepc;
296 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
297 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
298 reg_val = ((unsigned long *)cntx)[reg_num];
299 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
300 reg_val = (cntx->sstatus & SR_SPP) ?
301 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
302 else
2a88f38c 303 return -ENOENT;
e98b1085
AP
304
305 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
306 return -EFAULT;
307
308 return 0;
309}
310
311static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
312 const struct kvm_one_reg *reg)
313{
314 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
315 unsigned long __user *uaddr =
316 (unsigned long __user *)(unsigned long)reg->addr;
317 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
318 KVM_REG_SIZE_MASK |
319 KVM_REG_RISCV_CORE);
320 unsigned long reg_val;
321
322 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
323 return -EINVAL;
324 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
2a88f38c 325 return -ENOENT;
e98b1085
AP
326
327 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
328 return -EFAULT;
329
330 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
331 cntx->sepc = reg_val;
332 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
333 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
334 ((unsigned long *)cntx)[reg_num] = reg_val;
335 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
336 if (reg_val == KVM_RISCV_MODE_S)
337 cntx->sstatus |= SR_SPP;
338 else
339 cntx->sstatus &= ~SR_SPP;
340 } else
2a88f38c 341 return -ENOENT;
e98b1085
AP
342
343 return 0;
344}
345
346static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
347 unsigned long reg_num,
348 unsigned long *out_val)
349{
350 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
351
352 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
2a88f38c 353 return -ENOENT;
e98b1085
AP
354
355 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
356 kvm_riscv_vcpu_flush_interrupts(vcpu);
357 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
358 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
359 } else
360 *out_val = ((unsigned long *)csr)[reg_num];
361
362 return 0;
363}
364
365static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
366 unsigned long reg_num,
367 unsigned long reg_val)
368{
369 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
370
371 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
2a88f38c 372 return -ENOENT;
e98b1085
AP
373
374 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
375 reg_val &= VSIP_VALID_MASK;
376 reg_val <<= VSIP_TO_HVIP_SHIFT;
377 }
378
379 ((unsigned long *)csr)[reg_num] = reg_val;
380
381 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
382 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
383
384 return 0;
385}
386
c04913f2
MC
387static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
388 unsigned long reg_num,
389 unsigned long reg_val)
390{
391 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
392
393 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
394 sizeof(unsigned long))
395 return -EINVAL;
396
397 ((unsigned long *)csr)[reg_num] = reg_val;
398 return 0;
399}
400
401static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
402 unsigned long reg_num,
403 unsigned long *out_val)
404{
405 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
406
407 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
408 sizeof(unsigned long))
409 return -EINVAL;
410
411 *out_val = ((unsigned long *)csr)[reg_num];
412 return 0;
413}
414
e98b1085
AP
415static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
416 const struct kvm_one_reg *reg)
417{
418 int rc;
419 unsigned long __user *uaddr =
420 (unsigned long __user *)(unsigned long)reg->addr;
421 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
422 KVM_REG_SIZE_MASK |
423 KVM_REG_RISCV_CSR);
424 unsigned long reg_val, reg_subtype;
425
426 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
427 return -EINVAL;
428
429 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
430 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
431 switch (reg_subtype) {
432 case KVM_REG_RISCV_CSR_GENERAL:
433 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
434 break;
435 case KVM_REG_RISCV_CSR_AIA:
436 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
437 break;
c04913f2
MC
438 case KVM_REG_RISCV_CSR_SMSTATEEN:
439 rc = -EINVAL;
440 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
441 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
442 &reg_val);
443 break;
e98b1085 444 default:
2a88f38c 445 rc = -ENOENT;
e98b1085
AP
446 break;
447 }
448 if (rc)
449 return rc;
450
451 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
452 return -EFAULT;
453
454 return 0;
455}
456
457static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
458 const struct kvm_one_reg *reg)
459{
460 int rc;
461 unsigned long __user *uaddr =
462 (unsigned long __user *)(unsigned long)reg->addr;
463 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
464 KVM_REG_SIZE_MASK |
465 KVM_REG_RISCV_CSR);
466 unsigned long reg_val, reg_subtype;
467
468 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
469 return -EINVAL;
470
471 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
472 return -EFAULT;
473
474 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
475 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
476 switch (reg_subtype) {
477 case KVM_REG_RISCV_CSR_GENERAL:
478 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
479 break;
480 case KVM_REG_RISCV_CSR_AIA:
481 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
482 break;
c04913f2
MC
483 case KVM_REG_RISCV_CSR_SMSTATEEN:
484 rc = -EINVAL;
485 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
486 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
487 reg_val);
488break;
e98b1085 489 default:
2a88f38c 490 rc = -ENOENT;
e98b1085
AP
491 break;
492 }
493 if (rc)
494 return rc;
495
496 return 0;
497}
498
61302944
AP
499static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
500 unsigned long reg_num,
501 unsigned long *reg_val)
e98b1085 502{
e98b1085
AP
503 unsigned long host_isa_ext;
504
e98b1085
AP
505 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
506 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
2a88f38c 507 return -ENOENT;
e98b1085
AP
508
509 host_isa_ext = kvm_isa_ext_arr[reg_num];
17f71a2a
AP
510 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
511 return -ENOENT;
512
513 *reg_val = 0;
e98b1085 514 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
61302944 515 *reg_val = 1; /* Mark the given extension as available */
e98b1085
AP
516
517 return 0;
518}
519
61302944
AP
520static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
521 unsigned long reg_num,
522 unsigned long reg_val)
e98b1085 523{
e98b1085
AP
524 unsigned long host_isa_ext;
525
e98b1085
AP
526 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
527 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
2a88f38c 528 return -ENOENT;
e98b1085 529
e98b1085
AP
530 host_isa_ext = kvm_isa_ext_arr[reg_num];
531 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
a044ef71 532 return -ENOENT;
e98b1085 533
1099c809
DHB
534 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
535 return 0;
536
e98b1085
AP
537 if (!vcpu->arch.ran_atleast_once) {
538 /*
539 * All multi-letter extension and a few single letter
540 * extension can be disabled
541 */
542 if (reg_val == 1 &&
543 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
544 set_bit(host_isa_ext, vcpu->arch.isa);
545 else if (!reg_val &&
546 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
547 clear_bit(host_isa_ext, vcpu->arch.isa);
548 else
549 return -EINVAL;
550 kvm_riscv_vcpu_fp_reset(vcpu);
551 } else {
d57304bb 552 return -EBUSY;
e98b1085
AP
553 }
554
555 return 0;
556}
557
61302944
AP
558static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
559 unsigned long reg_num,
560 unsigned long *reg_val)
561{
562 unsigned long i, ext_id, ext_val;
563
564 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
2a88f38c 565 return -ENOENT;
61302944
AP
566
567 for (i = 0; i < BITS_PER_LONG; i++) {
568 ext_id = i + reg_num * BITS_PER_LONG;
569 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
570 break;
571
572 ext_val = 0;
573 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
574 if (ext_val)
575 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
576 }
577
578 return 0;
579}
580
581static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
582 unsigned long reg_num,
583 unsigned long reg_val, bool enable)
584{
585 unsigned long i, ext_id;
586
587 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
2a88f38c 588 return -ENOENT;
61302944
AP
589
590 for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
591 ext_id = i + reg_num * BITS_PER_LONG;
592 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
593 break;
594
595 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
596 }
597
598 return 0;
599}
600
601static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
602 const struct kvm_one_reg *reg)
603{
604 int rc;
605 unsigned long __user *uaddr =
606 (unsigned long __user *)(unsigned long)reg->addr;
607 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
608 KVM_REG_SIZE_MASK |
609 KVM_REG_RISCV_ISA_EXT);
610 unsigned long reg_val, reg_subtype;
611
612 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
613 return -EINVAL;
614
615 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
616 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
617
618 reg_val = 0;
619 switch (reg_subtype) {
620 case KVM_REG_RISCV_ISA_SINGLE:
621 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
622 break;
623 case KVM_REG_RISCV_ISA_MULTI_EN:
624 case KVM_REG_RISCV_ISA_MULTI_DIS:
625 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
626 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
627 reg_val = ~reg_val;
628 break;
629 default:
2a88f38c 630 rc = -ENOENT;
61302944
AP
631 }
632 if (rc)
633 return rc;
634
635 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
636 return -EFAULT;
637
638 return 0;
639}
640
641static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
642 const struct kvm_one_reg *reg)
643{
644 unsigned long __user *uaddr =
645 (unsigned long __user *)(unsigned long)reg->addr;
646 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
647 KVM_REG_SIZE_MASK |
648 KVM_REG_RISCV_ISA_EXT);
649 unsigned long reg_val, reg_subtype;
650
651 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
652 return -EINVAL;
653
654 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
655 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
656
657 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
658 return -EFAULT;
659
660 switch (reg_subtype) {
661 case KVM_REG_RISCV_ISA_SINGLE:
662 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
663 case KVM_REG_RISCV_SBI_MULTI_EN:
664 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
665 case KVM_REG_RISCV_SBI_MULTI_DIS:
666 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
667 default:
2a88f38c 668 return -ENOENT;
61302944
AP
669 }
670
671 return 0;
672}
673
031f9efa
HX
674static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
675 u64 __user *uindices)
676{
677 int n = 0;
678
679 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
680 i++) {
681 u64 size;
682 u64 reg;
683
684 /*
685 * Avoid reporting config reg if the corresponding extension
686 * was not available.
687 */
688 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
689 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
690 continue;
691 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
692 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
693 continue;
694
695 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
696 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
697
698 if (uindices) {
699 if (put_user(reg, uindices))
700 return -EFAULT;
701 uindices++;
702 }
703
704 n++;
705 }
706
707 return n;
708}
709
710static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
711{
712 return copy_config_reg_indices(vcpu, NULL);
713}
714
715static inline unsigned long num_core_regs(void)
716{
717 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
718}
719
720static int copy_core_reg_indices(u64 __user *uindices)
721{
722 int n = num_core_regs();
723
724 for (int i = 0; i < n; i++) {
725 u64 size = IS_ENABLED(CONFIG_32BIT) ?
726 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
727 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
728
729 if (uindices) {
730 if (put_user(reg, uindices))
731 return -EFAULT;
732 uindices++;
733 }
734 }
735
736 return n;
737}
738
739static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
740{
741 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
742
743 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
744 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
c04913f2
MC
745 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
746 n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
031f9efa
HX
747
748 return n;
749}
750
751static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
752 u64 __user *uindices)
753{
754 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
c04913f2 755 int n2 = 0, n3 = 0;
031f9efa
HX
756
757 /* copy general csr regs */
758 for (int i = 0; i < n1; i++) {
759 u64 size = IS_ENABLED(CONFIG_32BIT) ?
760 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
761 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
762 KVM_REG_RISCV_CSR_GENERAL | i;
763
764 if (uindices) {
765 if (put_user(reg, uindices))
766 return -EFAULT;
767 uindices++;
768 }
769 }
770
771 /* copy AIA csr regs */
772 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
773 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
774
775 for (int i = 0; i < n2; i++) {
776 u64 size = IS_ENABLED(CONFIG_32BIT) ?
777 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
778 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
779 KVM_REG_RISCV_CSR_AIA | i;
780
781 if (uindices) {
782 if (put_user(reg, uindices))
783 return -EFAULT;
784 uindices++;
785 }
786 }
787 }
788
c04913f2
MC
789 /* copy Smstateen csr regs */
790 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
791 n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
792
793 for (int i = 0; i < n3; i++) {
794 u64 size = IS_ENABLED(CONFIG_32BIT) ?
795 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
796 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
797 KVM_REG_RISCV_CSR_SMSTATEEN | i;
798
799 if (uindices) {
800 if (put_user(reg, uindices))
801 return -EFAULT;
802 uindices++;
803 }
804 }
805 }
806
807 return n1 + n2 + n3;
031f9efa
HX
808}
809
810static inline unsigned long num_timer_regs(void)
811{
812 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
813}
814
815static int copy_timer_reg_indices(u64 __user *uindices)
816{
817 int n = num_timer_regs();
818
819 for (int i = 0; i < n; i++) {
820 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
821 KVM_REG_RISCV_TIMER | i;
822
823 if (uindices) {
824 if (put_user(reg, uindices))
825 return -EFAULT;
826 uindices++;
827 }
828 }
829
830 return n;
831}
832
833static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
834{
835 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
836
837 if (riscv_isa_extension_available(vcpu->arch.isa, f))
838 return sizeof(cntx->fp.f) / sizeof(u32);
839 else
840 return 0;
841}
842
843static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
844 u64 __user *uindices)
845{
846 int n = num_fp_f_regs(vcpu);
847
848 for (int i = 0; i < n; i++) {
849 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
850 KVM_REG_RISCV_FP_F | i;
851
852 if (uindices) {
853 if (put_user(reg, uindices))
854 return -EFAULT;
855 uindices++;
856 }
857 }
858
859 return n;
860}
861
862static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
863{
864 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
865
866 if (riscv_isa_extension_available(vcpu->arch.isa, d))
867 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
868 else
869 return 0;
870}
871
872static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
873 u64 __user *uindices)
874{
875 int i;
876 int n = num_fp_d_regs(vcpu);
877 u64 reg;
878
879 /* copy fp.d.f indices */
880 for (i = 0; i < n-1; i++) {
881 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
882 KVM_REG_RISCV_FP_D | i;
883
884 if (uindices) {
885 if (put_user(reg, uindices))
886 return -EFAULT;
887 uindices++;
888 }
889 }
890
891 /* copy fp.d.fcsr indices */
892 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
893 if (uindices) {
894 if (put_user(reg, uindices))
895 return -EFAULT;
896 uindices++;
897 }
898
899 return n;
900}
901
902static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
903 u64 __user *uindices)
904{
905 unsigned int n = 0;
906 unsigned long isa_ext;
907
908 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
909 u64 size = IS_ENABLED(CONFIG_32BIT) ?
910 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
911 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
912
913 isa_ext = kvm_isa_ext_arr[i];
ef4d4836 914 if (!__riscv_isa_extension_available(NULL, isa_ext))
031f9efa
HX
915 continue;
916
917 if (uindices) {
918 if (put_user(reg, uindices))
919 return -EFAULT;
920 uindices++;
921 }
922
923 n++;
924 }
925
926 return n;
927}
928
929static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
930{
931 return copy_isa_ext_reg_indices(vcpu, NULL);;
932}
933
934static inline unsigned long num_sbi_ext_regs(void)
935{
936 /*
937 * number of KVM_REG_RISCV_SBI_SINGLE +
938 * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
939 */
940 return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
941}
942
943static int copy_sbi_ext_reg_indices(u64 __user *uindices)
944{
945 int n;
946
947 /* copy KVM_REG_RISCV_SBI_SINGLE */
948 n = KVM_RISCV_SBI_EXT_MAX;
949 for (int i = 0; i < n; i++) {
950 u64 size = IS_ENABLED(CONFIG_32BIT) ?
951 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
952 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
953 KVM_REG_RISCV_SBI_SINGLE | i;
954
955 if (uindices) {
956 if (put_user(reg, uindices))
957 return -EFAULT;
958 uindices++;
959 }
960 }
961
962 /* copy KVM_REG_RISCV_SBI_MULTI */
963 n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
964 for (int i = 0; i < n; i++) {
965 u64 size = IS_ENABLED(CONFIG_32BIT) ?
966 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
967 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
968 KVM_REG_RISCV_SBI_MULTI_EN | i;
969
970 if (uindices) {
971 if (put_user(reg, uindices))
972 return -EFAULT;
973 uindices++;
974 }
975
976 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
977 KVM_REG_RISCV_SBI_MULTI_DIS | i;
978
979 if (uindices) {
980 if (put_user(reg, uindices))
981 return -EFAULT;
982 uindices++;
983 }
984 }
985
986 return num_sbi_ext_regs();
987}
988
989/*
990 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
991 *
992 * This is for all registers.
993 */
994unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
995{
996 unsigned long res = 0;
997
998 res += num_config_regs(vcpu);
999 res += num_core_regs();
1000 res += num_csr_regs(vcpu);
1001 res += num_timer_regs();
1002 res += num_fp_f_regs(vcpu);
1003 res += num_fp_d_regs(vcpu);
1004 res += num_isa_ext_regs(vcpu);
1005 res += num_sbi_ext_regs();
1006
1007 return res;
1008}
1009
1010/*
1011 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1012 */
1013int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1014 u64 __user *uindices)
1015{
1016 int ret;
1017
1018 ret = copy_config_reg_indices(vcpu, uindices);
1019 if (ret < 0)
1020 return ret;
1021 uindices += ret;
1022
1023 ret = copy_core_reg_indices(uindices);
1024 if (ret < 0)
1025 return ret;
1026 uindices += ret;
1027
1028 ret = copy_csr_reg_indices(vcpu, uindices);
1029 if (ret < 0)
1030 return ret;
1031 uindices += ret;
1032
1033 ret = copy_timer_reg_indices(uindices);
1034 if (ret < 0)
1035 return ret;
1036 uindices += ret;
1037
1038 ret = copy_fp_f_reg_indices(vcpu, uindices);
1039 if (ret < 0)
1040 return ret;
1041 uindices += ret;
1042
1043 ret = copy_fp_d_reg_indices(vcpu, uindices);
1044 if (ret < 0)
1045 return ret;
1046 uindices += ret;
1047
1048 ret = copy_isa_ext_reg_indices(vcpu, uindices);
1049 if (ret < 0)
1050 return ret;
1051 uindices += ret;
1052
1053 ret = copy_sbi_ext_reg_indices(uindices);
1054 if (ret < 0)
1055 return ret;
1056
1057 return 0;
1058}
1059
e98b1085
AP
1060int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1061 const struct kvm_one_reg *reg)
1062{
1063 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1064 case KVM_REG_RISCV_CONFIG:
1065 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1066 case KVM_REG_RISCV_CORE:
1067 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1068 case KVM_REG_RISCV_CSR:
1069 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1070 case KVM_REG_RISCV_TIMER:
1071 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1072 case KVM_REG_RISCV_FP_F:
1073 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1074 KVM_REG_RISCV_FP_F);
1075 case KVM_REG_RISCV_FP_D:
1076 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1077 KVM_REG_RISCV_FP_D);
1078 case KVM_REG_RISCV_ISA_EXT:
1079 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1080 case KVM_REG_RISCV_SBI_EXT:
1081 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1082 case KVM_REG_RISCV_VECTOR:
630b4cee 1083 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
e98b1085
AP
1084 default:
1085 break;
1086 }
1087
2a88f38c 1088 return -ENOENT;
e98b1085
AP
1089}
1090
1091int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1092 const struct kvm_one_reg *reg)
1093{
1094 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1095 case KVM_REG_RISCV_CONFIG:
1096 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1097 case KVM_REG_RISCV_CORE:
1098 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1099 case KVM_REG_RISCV_CSR:
1100 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1101 case KVM_REG_RISCV_TIMER:
1102 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1103 case KVM_REG_RISCV_FP_F:
1104 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1105 KVM_REG_RISCV_FP_F);
1106 case KVM_REG_RISCV_FP_D:
1107 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1108 KVM_REG_RISCV_FP_D);
1109 case KVM_REG_RISCV_ISA_EXT:
1110 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1111 case KVM_REG_RISCV_SBI_EXT:
1112 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1113 case KVM_REG_RISCV_VECTOR:
630b4cee 1114 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
e98b1085
AP
1115 default:
1116 break;
1117 }
1118
2a88f38c 1119 return -ENOENT;
e98b1085 1120}