]>
Commit | Line | Data |
---|---|---|
c86c7721 EA |
1 | /* |
2 | * VGIC: KVM DEVICE API | |
3 | * | |
4 | * Copyright (C) 2015 ARM Ltd. | |
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | #include <linux/kvm_host.h> | |
17 | #include <kvm/arm_vgic.h> | |
fca25602 | 18 | #include <linux/uaccess.h> |
e2c1f9ab | 19 | #include <asm/kvm_mmu.h> |
94574c94 | 20 | #include <asm/cputype.h> |
fca25602 | 21 | #include "vgic.h" |
c86c7721 EA |
22 | |
23 | /* common helpers */ | |
24 | ||
1085fdc6 AP |
25 | int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, |
26 | phys_addr_t addr, phys_addr_t alignment) | |
e2c1f9ab EA |
27 | { |
28 | if (addr & ~KVM_PHYS_MASK) | |
29 | return -E2BIG; | |
30 | ||
31 | if (!IS_ALIGNED(addr, alignment)) | |
32 | return -EINVAL; | |
33 | ||
34 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) | |
35 | return -EEXIST; | |
36 | ||
37 | return 0; | |
38 | } | |
39 | ||
40 | /** | |
41 | * kvm_vgic_addr - set or get vgic VM base addresses | |
42 | * @kvm: pointer to the vm struct | |
43 | * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX | |
44 | * @addr: pointer to address value | |
45 | * @write: if true set the address in the VM address space, if false read the | |
46 | * address | |
47 | * | |
48 | * Set or get the vgic base addresses for the distributor and the virtual CPU | |
49 | * interface in the VM physical address space. These addresses are properties | |
50 | * of the emulated core/SoC and therefore user space initially knows this | |
51 | * information. | |
52 | * Check them for sanity (alignment, double assignment). We can't check for | |
53 | * overlapping regions in case of a virtual GICv3 here, since we don't know | |
54 | * the number of VCPUs yet, so we defer this check to map_resources(). | |
55 | */ | |
56 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |
57 | { | |
58 | int r = 0; | |
59 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
60 | int type_needed; | |
61 | phys_addr_t *addr_ptr, alignment; | |
62 | ||
63 | mutex_lock(&kvm->lock); | |
64 | switch (type) { | |
65 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | |
66 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; | |
67 | addr_ptr = &vgic->vgic_dist_base; | |
68 | alignment = SZ_4K; | |
69 | break; | |
70 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | |
71 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V2; | |
72 | addr_ptr = &vgic->vgic_cpu_base; | |
73 | alignment = SZ_4K; | |
74 | break; | |
e2c1f9ab EA |
75 | case KVM_VGIC_V3_ADDR_TYPE_DIST: |
76 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; | |
77 | addr_ptr = &vgic->vgic_dist_base; | |
78 | alignment = SZ_64K; | |
79 | break; | |
80 | case KVM_VGIC_V3_ADDR_TYPE_REDIST: | |
81 | type_needed = KVM_DEV_TYPE_ARM_VGIC_V3; | |
82 | addr_ptr = &vgic->vgic_redist_base; | |
83 | alignment = SZ_64K; | |
84 | break; | |
e2c1f9ab EA |
85 | default: |
86 | r = -ENODEV; | |
87 | goto out; | |
88 | } | |
89 | ||
90 | if (vgic->vgic_model != type_needed) { | |
91 | r = -ENODEV; | |
92 | goto out; | |
93 | } | |
94 | ||
95 | if (write) { | |
96 | r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment); | |
97 | if (!r) | |
98 | *addr_ptr = *addr; | |
99 | } else { | |
100 | *addr = *addr_ptr; | |
101 | } | |
102 | ||
103 | out: | |
104 | mutex_unlock(&kvm->lock); | |
105 | return r; | |
106 | } | |
107 | ||
fca25602 EA |
108 | static int vgic_set_common_attr(struct kvm_device *dev, |
109 | struct kvm_device_attr *attr) | |
110 | { | |
afcc7c50 EA |
111 | int r; |
112 | ||
fca25602 | 113 | switch (attr->group) { |
e5c30294 EA |
114 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { |
115 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | |
116 | u64 addr; | |
117 | unsigned long type = (unsigned long)attr->attr; | |
118 | ||
119 | if (copy_from_user(&addr, uaddr, sizeof(addr))) | |
120 | return -EFAULT; | |
121 | ||
122 | r = kvm_vgic_addr(dev->kvm, type, &addr, true); | |
123 | return (r == -ENODEV) ? -ENXIO : r; | |
124 | } | |
fca25602 EA |
125 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { |
126 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
127 | u32 val; | |
128 | int ret = 0; | |
129 | ||
130 | if (get_user(val, uaddr)) | |
131 | return -EFAULT; | |
132 | ||
133 | /* | |
134 | * We require: | |
135 | * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs | |
136 | * - at most 1024 interrupts | |
137 | * - a multiple of 32 interrupts | |
138 | */ | |
139 | if (val < (VGIC_NR_PRIVATE_IRQS + 32) || | |
140 | val > VGIC_MAX_RESERVED || | |
141 | (val & 31)) | |
142 | return -EINVAL; | |
143 | ||
144 | mutex_lock(&dev->kvm->lock); | |
145 | ||
146 | if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) | |
147 | ret = -EBUSY; | |
148 | else | |
149 | dev->kvm->arch.vgic.nr_spis = | |
150 | val - VGIC_NR_PRIVATE_IRQS; | |
151 | ||
152 | mutex_unlock(&dev->kvm->lock); | |
153 | ||
154 | return ret; | |
155 | } | |
afcc7c50 EA |
156 | case KVM_DEV_ARM_VGIC_GRP_CTRL: { |
157 | switch (attr->attr) { | |
158 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | |
159 | mutex_lock(&dev->kvm->lock); | |
160 | r = vgic_init(dev->kvm); | |
161 | mutex_unlock(&dev->kvm->lock); | |
162 | return r; | |
163 | } | |
164 | break; | |
165 | } | |
fca25602 EA |
166 | } |
167 | ||
168 | return -ENXIO; | |
169 | } | |
170 | ||
171 | static int vgic_get_common_attr(struct kvm_device *dev, | |
172 | struct kvm_device_attr *attr) | |
173 | { | |
174 | int r = -ENXIO; | |
175 | ||
176 | switch (attr->group) { | |
e5c30294 EA |
177 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { |
178 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | |
179 | u64 addr; | |
180 | unsigned long type = (unsigned long)attr->attr; | |
181 | ||
182 | r = kvm_vgic_addr(dev->kvm, type, &addr, false); | |
183 | if (r) | |
184 | return (r == -ENODEV) ? -ENXIO : r; | |
185 | ||
186 | if (copy_to_user(uaddr, &addr, sizeof(addr))) | |
187 | return -EFAULT; | |
188 | break; | |
189 | } | |
fca25602 EA |
190 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { |
191 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
192 | ||
193 | r = put_user(dev->kvm->arch.vgic.nr_spis + | |
194 | VGIC_NR_PRIVATE_IRQS, uaddr); | |
195 | break; | |
196 | } | |
197 | } | |
198 | ||
199 | return r; | |
200 | } | |
201 | ||
c86c7721 EA |
202 | static int vgic_create(struct kvm_device *dev, u32 type) |
203 | { | |
204 | return kvm_vgic_create(dev->kvm, type); | |
205 | } | |
206 | ||
207 | static void vgic_destroy(struct kvm_device *dev) | |
208 | { | |
209 | kfree(dev); | |
210 | } | |
211 | ||
42c8870f | 212 | int kvm_register_vgic_device(unsigned long type) |
c86c7721 | 213 | { |
42c8870f AP |
214 | int ret = -ENODEV; |
215 | ||
c86c7721 EA |
216 | switch (type) { |
217 | case KVM_DEV_TYPE_ARM_VGIC_V2: | |
42c8870f AP |
218 | ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops, |
219 | KVM_DEV_TYPE_ARM_VGIC_V2); | |
c86c7721 | 220 | break; |
c86c7721 | 221 | case KVM_DEV_TYPE_ARM_VGIC_V3: |
42c8870f AP |
222 | ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, |
223 | KVM_DEV_TYPE_ARM_VGIC_V3); | |
7a1ff708 | 224 | |
0e4e82f1 AP |
225 | if (ret) |
226 | break; | |
227 | ret = kvm_vgic_register_its_device(); | |
c86c7721 | 228 | break; |
c86c7721 | 229 | } |
42c8870f AP |
230 | |
231 | return ret; | |
c86c7721 EA |
232 | } |
233 | ||
94574c94 VK |
234 | int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, |
235 | struct vgic_reg_attr *reg_attr) | |
ba7b9169 CD |
236 | { |
237 | int cpuid; | |
238 | ||
239 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | |
240 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | |
241 | ||
242 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) | |
243 | return -EINVAL; | |
244 | ||
245 | reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); | |
246 | reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | /* unlocks vcpus from @vcpu_lock_idx and smaller */ | |
252 | static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) | |
253 | { | |
254 | struct kvm_vcpu *tmp_vcpu; | |
255 | ||
256 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | |
257 | tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); | |
258 | mutex_unlock(&tmp_vcpu->mutex); | |
259 | } | |
260 | } | |
261 | ||
262 | static void unlock_all_vcpus(struct kvm *kvm) | |
263 | { | |
264 | unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); | |
265 | } | |
266 | ||
267 | /* Returns true if all vcpus were locked, false otherwise */ | |
268 | static bool lock_all_vcpus(struct kvm *kvm) | |
269 | { | |
270 | struct kvm_vcpu *tmp_vcpu; | |
271 | int c; | |
272 | ||
273 | /* | |
274 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | |
275 | * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure | |
276 | * that no other VCPUs are run and fiddle with the vgic state while we | |
277 | * access it. | |
278 | */ | |
279 | kvm_for_each_vcpu(c, tmp_vcpu, kvm) { | |
280 | if (!mutex_trylock(&tmp_vcpu->mutex)) { | |
281 | unlock_vcpus(kvm, c - 1); | |
282 | return false; | |
283 | } | |
284 | } | |
285 | ||
286 | return true; | |
287 | } | |
288 | ||
1fe00098 | 289 | /** |
94574c94 | 290 | * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state |
f94591e2 | 291 | * |
1fe00098 CD |
292 | * @dev: kvm device handle |
293 | * @attr: kvm device attribute | |
294 | * @reg: address the value is read or written | |
295 | * @is_write: true if userspace is writing a register | |
f94591e2 | 296 | */ |
94574c94 | 297 | static int vgic_v2_attr_regs_access(struct kvm_device *dev, |
1fe00098 CD |
298 | struct kvm_device_attr *attr, |
299 | u32 *reg, bool is_write) | |
f94591e2 | 300 | { |
ba7b9169 | 301 | struct vgic_reg_attr reg_attr; |
7d450e28 | 302 | gpa_t addr; |
ba7b9169 CD |
303 | struct kvm_vcpu *vcpu; |
304 | int ret; | |
7d450e28 | 305 | |
94574c94 | 306 | ret = vgic_v2_parse_attr(dev, attr, ®_attr); |
ba7b9169 CD |
307 | if (ret) |
308 | return ret; | |
309 | ||
310 | vcpu = reg_attr.vcpu; | |
311 | addr = reg_attr.addr; | |
7d450e28 AP |
312 | |
313 | mutex_lock(&dev->kvm->lock); | |
314 | ||
315 | ret = vgic_init(dev->kvm); | |
316 | if (ret) | |
317 | goto out; | |
318 | ||
ba7b9169 CD |
319 | if (!lock_all_vcpus(dev->kvm)) { |
320 | ret = -EBUSY; | |
7d450e28 AP |
321 | goto out; |
322 | } | |
323 | ||
7d450e28 AP |
324 | switch (attr->group) { |
325 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | |
878c569e | 326 | ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg); |
7d450e28 AP |
327 | break; |
328 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
329 | ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg); | |
330 | break; | |
331 | default: | |
332 | ret = -EINVAL; | |
333 | break; | |
334 | } | |
335 | ||
ba7b9169 | 336 | unlock_all_vcpus(dev->kvm); |
7d450e28 | 337 | out: |
7d450e28 AP |
338 | mutex_unlock(&dev->kvm->lock); |
339 | return ret; | |
f94591e2 EA |
340 | } |
341 | ||
c86c7721 EA |
342 | static int vgic_v2_set_attr(struct kvm_device *dev, |
343 | struct kvm_device_attr *attr) | |
344 | { | |
fca25602 EA |
345 | int ret; |
346 | ||
347 | ret = vgic_set_common_attr(dev, attr); | |
f94591e2 EA |
348 | if (ret != -ENXIO) |
349 | return ret; | |
350 | ||
351 | switch (attr->group) { | |
352 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
353 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | |
354 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
355 | u32 reg; | |
356 | ||
357 | if (get_user(reg, uaddr)) | |
358 | return -EFAULT; | |
fca25602 | 359 | |
94574c94 | 360 | return vgic_v2_attr_regs_access(dev, attr, ®, true); |
f94591e2 EA |
361 | } |
362 | } | |
363 | ||
364 | return -ENXIO; | |
c86c7721 EA |
365 | } |
366 | ||
367 | static int vgic_v2_get_attr(struct kvm_device *dev, | |
368 | struct kvm_device_attr *attr) | |
369 | { | |
fca25602 EA |
370 | int ret; |
371 | ||
372 | ret = vgic_get_common_attr(dev, attr); | |
f94591e2 EA |
373 | if (ret != -ENXIO) |
374 | return ret; | |
375 | ||
376 | switch (attr->group) { | |
377 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
378 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | |
379 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
380 | u32 reg = 0; | |
381 | ||
94574c94 | 382 | ret = vgic_v2_attr_regs_access(dev, attr, ®, false); |
f94591e2 EA |
383 | if (ret) |
384 | return ret; | |
385 | return put_user(reg, uaddr); | |
386 | } | |
387 | } | |
388 | ||
389 | return -ENXIO; | |
c86c7721 EA |
390 | } |
391 | ||
392 | static int vgic_v2_has_attr(struct kvm_device *dev, | |
393 | struct kvm_device_attr *attr) | |
394 | { | |
fca25602 | 395 | switch (attr->group) { |
e5c30294 EA |
396 | case KVM_DEV_ARM_VGIC_GRP_ADDR: |
397 | switch (attr->attr) { | |
398 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | |
399 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | |
400 | return 0; | |
401 | } | |
402 | break; | |
f94591e2 EA |
403 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: |
404 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | |
405 | return vgic_v2_has_attr_regs(dev, attr); | |
fca25602 EA |
406 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: |
407 | return 0; | |
afcc7c50 EA |
408 | case KVM_DEV_ARM_VGIC_GRP_CTRL: |
409 | switch (attr->attr) { | |
410 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | |
411 | return 0; | |
412 | } | |
fca25602 | 413 | } |
c86c7721 EA |
414 | return -ENXIO; |
415 | } | |
416 | ||
417 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | |
418 | .name = "kvm-arm-vgic-v2", | |
419 | .create = vgic_create, | |
420 | .destroy = vgic_destroy, | |
421 | .set_attr = vgic_v2_set_attr, | |
422 | .get_attr = vgic_v2_get_attr, | |
423 | .has_attr = vgic_v2_has_attr, | |
424 | }; | |
425 | ||
94574c94 VK |
426 | int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, |
427 | struct vgic_reg_attr *reg_attr) | |
428 | { | |
429 | unsigned long vgic_mpidr, mpidr_reg; | |
430 | ||
431 | /* | |
432 | * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group, | |
433 | * attr might not hold MPIDR. Hence assume vcpu0. | |
434 | */ | |
435 | if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) { | |
436 | vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >> | |
437 | KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT; | |
438 | ||
439 | mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr); | |
440 | reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg); | |
441 | } else { | |
442 | reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0); | |
443 | } | |
444 | ||
445 | if (!reg_attr->vcpu) | |
446 | return -EINVAL; | |
447 | ||
448 | reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
453 | /* | |
454 | * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state | |
455 | * | |
456 | * @dev: kvm device handle | |
457 | * @attr: kvm device attribute | |
458 | * @reg: address the value is read or written | |
459 | * @is_write: true if userspace is writing a register | |
460 | */ | |
461 | static int vgic_v3_attr_regs_access(struct kvm_device *dev, | |
462 | struct kvm_device_attr *attr, | |
463 | u64 *reg, bool is_write) | |
464 | { | |
465 | struct vgic_reg_attr reg_attr; | |
466 | gpa_t addr; | |
467 | struct kvm_vcpu *vcpu; | |
468 | int ret; | |
469 | u32 tmp32; | |
470 | ||
471 | ret = vgic_v3_parse_attr(dev, attr, ®_attr); | |
472 | if (ret) | |
473 | return ret; | |
474 | ||
475 | vcpu = reg_attr.vcpu; | |
476 | addr = reg_attr.addr; | |
477 | ||
478 | mutex_lock(&dev->kvm->lock); | |
479 | ||
480 | if (unlikely(!vgic_initialized(dev->kvm))) { | |
481 | ret = -EBUSY; | |
482 | goto out; | |
483 | } | |
484 | ||
485 | if (!lock_all_vcpus(dev->kvm)) { | |
486 | ret = -EBUSY; | |
487 | goto out; | |
488 | } | |
489 | ||
490 | switch (attr->group) { | |
491 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
492 | if (is_write) | |
493 | tmp32 = *reg; | |
494 | ||
495 | ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32); | |
496 | if (!is_write) | |
497 | *reg = tmp32; | |
498 | break; | |
499 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: | |
500 | if (is_write) | |
501 | tmp32 = *reg; | |
502 | ||
503 | ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32); | |
504 | if (!is_write) | |
505 | *reg = tmp32; | |
506 | break; | |
507 | default: | |
508 | ret = -EINVAL; | |
509 | break; | |
510 | } | |
511 | ||
512 | unlock_all_vcpus(dev->kvm); | |
513 | out: | |
514 | mutex_unlock(&dev->kvm->lock); | |
515 | return ret; | |
516 | } | |
517 | ||
c86c7721 EA |
518 | static int vgic_v3_set_attr(struct kvm_device *dev, |
519 | struct kvm_device_attr *attr) | |
520 | { | |
94574c94 VK |
521 | int ret; |
522 | ||
523 | ret = vgic_set_common_attr(dev, attr); | |
524 | if (ret != -ENXIO) | |
525 | return ret; | |
526 | ||
527 | switch (attr->group) { | |
528 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
529 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: { | |
530 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
531 | u32 tmp32; | |
532 | u64 reg; | |
533 | ||
534 | if (get_user(tmp32, uaddr)) | |
535 | return -EFAULT; | |
536 | ||
537 | reg = tmp32; | |
538 | return vgic_v3_attr_regs_access(dev, attr, ®, true); | |
539 | } | |
540 | } | |
541 | return -ENXIO; | |
c86c7721 EA |
542 | } |
543 | ||
544 | static int vgic_v3_get_attr(struct kvm_device *dev, | |
545 | struct kvm_device_attr *attr) | |
546 | { | |
94574c94 VK |
547 | int ret; |
548 | ||
549 | ret = vgic_get_common_attr(dev, attr); | |
550 | if (ret != -ENXIO) | |
551 | return ret; | |
552 | ||
553 | switch (attr->group) { | |
554 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
555 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: { | |
556 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
557 | u64 reg; | |
558 | u32 tmp32; | |
559 | ||
560 | ret = vgic_v3_attr_regs_access(dev, attr, ®, false); | |
561 | if (ret) | |
562 | return ret; | |
563 | tmp32 = reg; | |
564 | return put_user(tmp32, uaddr); | |
565 | } | |
566 | } | |
567 | ||
568 | return -ENXIO; | |
c86c7721 EA |
569 | } |
570 | ||
571 | static int vgic_v3_has_attr(struct kvm_device *dev, | |
572 | struct kvm_device_attr *attr) | |
573 | { | |
fca25602 | 574 | switch (attr->group) { |
e5c30294 EA |
575 | case KVM_DEV_ARM_VGIC_GRP_ADDR: |
576 | switch (attr->attr) { | |
577 | case KVM_VGIC_V3_ADDR_TYPE_DIST: | |
578 | case KVM_VGIC_V3_ADDR_TYPE_REDIST: | |
579 | return 0; | |
580 | } | |
581 | break; | |
94574c94 VK |
582 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: |
583 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: | |
584 | return vgic_v3_has_attr_regs(dev, attr); | |
fca25602 EA |
585 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: |
586 | return 0; | |
afcc7c50 EA |
587 | case KVM_DEV_ARM_VGIC_GRP_CTRL: |
588 | switch (attr->attr) { | |
589 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | |
590 | return 0; | |
591 | } | |
fca25602 | 592 | } |
c86c7721 EA |
593 | return -ENXIO; |
594 | } | |
595 | ||
596 | struct kvm_device_ops kvm_arm_vgic_v3_ops = { | |
597 | .name = "kvm-arm-vgic-v3", | |
598 | .create = vgic_create, | |
599 | .destroy = vgic_destroy, | |
600 | .set_attr = vgic_v3_set_attr, | |
601 | .get_attr = vgic_v3_get_attr, | |
602 | .has_attr = vgic_v3_has_attr, | |
603 | }; |