]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - virt/kvm/arm/vgic/vgic-kvm-device.c
KVM: arm64: vgic-its: Implement MSI injection in ITS emulation
[thirdparty/kernel/linux.git] / virt / kvm / arm / vgic / vgic-kvm-device.c
CommitLineData
c86c7721
EA
1/*
2 * VGIC: KVM DEVICE API
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#include <linux/kvm_host.h>
17#include <kvm/arm_vgic.h>
fca25602 18#include <linux/uaccess.h>
e2c1f9ab 19#include <asm/kvm_mmu.h>
fca25602 20#include "vgic.h"
c86c7721
EA
21
22/* common helpers */
23
1085fdc6
AP
24int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
25 phys_addr_t addr, phys_addr_t alignment)
e2c1f9ab
EA
26{
27 if (addr & ~KVM_PHYS_MASK)
28 return -E2BIG;
29
30 if (!IS_ALIGNED(addr, alignment))
31 return -EINVAL;
32
33 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
34 return -EEXIST;
35
36 return 0;
37}
38
39/**
40 * kvm_vgic_addr - set or get vgic VM base addresses
41 * @kvm: pointer to the vm struct
42 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
43 * @addr: pointer to address value
44 * @write: if true set the address in the VM address space, if false read the
45 * address
46 *
47 * Set or get the vgic base addresses for the distributor and the virtual CPU
48 * interface in the VM physical address space. These addresses are properties
49 * of the emulated core/SoC and therefore user space initially knows this
50 * information.
51 * Check them for sanity (alignment, double assignment). We can't check for
52 * overlapping regions in case of a virtual GICv3 here, since we don't know
53 * the number of VCPUs yet, so we defer this check to map_resources().
54 */
55int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
56{
57 int r = 0;
58 struct vgic_dist *vgic = &kvm->arch.vgic;
59 int type_needed;
60 phys_addr_t *addr_ptr, alignment;
61
62 mutex_lock(&kvm->lock);
63 switch (type) {
64 case KVM_VGIC_V2_ADDR_TYPE_DIST:
65 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
66 addr_ptr = &vgic->vgic_dist_base;
67 alignment = SZ_4K;
68 break;
69 case KVM_VGIC_V2_ADDR_TYPE_CPU:
70 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
71 addr_ptr = &vgic->vgic_cpu_base;
72 alignment = SZ_4K;
73 break;
74#ifdef CONFIG_KVM_ARM_VGIC_V3
75 case KVM_VGIC_V3_ADDR_TYPE_DIST:
76 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
77 addr_ptr = &vgic->vgic_dist_base;
78 alignment = SZ_64K;
79 break;
80 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
81 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
82 addr_ptr = &vgic->vgic_redist_base;
83 alignment = SZ_64K;
84 break;
85#endif
86 default:
87 r = -ENODEV;
88 goto out;
89 }
90
91 if (vgic->vgic_model != type_needed) {
92 r = -ENODEV;
93 goto out;
94 }
95
96 if (write) {
97 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
98 if (!r)
99 *addr_ptr = *addr;
100 } else {
101 *addr = *addr_ptr;
102 }
103
104out:
105 mutex_unlock(&kvm->lock);
106 return r;
107}
108
fca25602
EA
109static int vgic_set_common_attr(struct kvm_device *dev,
110 struct kvm_device_attr *attr)
111{
afcc7c50
EA
112 int r;
113
fca25602 114 switch (attr->group) {
e5c30294
EA
115 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
116 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
117 u64 addr;
118 unsigned long type = (unsigned long)attr->attr;
119
120 if (copy_from_user(&addr, uaddr, sizeof(addr)))
121 return -EFAULT;
122
123 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
124 return (r == -ENODEV) ? -ENXIO : r;
125 }
fca25602
EA
126 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
127 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
128 u32 val;
129 int ret = 0;
130
131 if (get_user(val, uaddr))
132 return -EFAULT;
133
134 /*
135 * We require:
136 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
137 * - at most 1024 interrupts
138 * - a multiple of 32 interrupts
139 */
140 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
141 val > VGIC_MAX_RESERVED ||
142 (val & 31))
143 return -EINVAL;
144
145 mutex_lock(&dev->kvm->lock);
146
147 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
148 ret = -EBUSY;
149 else
150 dev->kvm->arch.vgic.nr_spis =
151 val - VGIC_NR_PRIVATE_IRQS;
152
153 mutex_unlock(&dev->kvm->lock);
154
155 return ret;
156 }
afcc7c50
EA
157 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
158 switch (attr->attr) {
159 case KVM_DEV_ARM_VGIC_CTRL_INIT:
160 mutex_lock(&dev->kvm->lock);
161 r = vgic_init(dev->kvm);
162 mutex_unlock(&dev->kvm->lock);
163 return r;
164 }
165 break;
166 }
fca25602
EA
167 }
168
169 return -ENXIO;
170}
171
172static int vgic_get_common_attr(struct kvm_device *dev,
173 struct kvm_device_attr *attr)
174{
175 int r = -ENXIO;
176
177 switch (attr->group) {
e5c30294
EA
178 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
179 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
180 u64 addr;
181 unsigned long type = (unsigned long)attr->attr;
182
183 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
184 if (r)
185 return (r == -ENODEV) ? -ENXIO : r;
186
187 if (copy_to_user(uaddr, &addr, sizeof(addr)))
188 return -EFAULT;
189 break;
190 }
fca25602
EA
191 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
192 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
193
194 r = put_user(dev->kvm->arch.vgic.nr_spis +
195 VGIC_NR_PRIVATE_IRQS, uaddr);
196 break;
197 }
198 }
199
200 return r;
201}
202
c86c7721
EA
203static int vgic_create(struct kvm_device *dev, u32 type)
204{
205 return kvm_vgic_create(dev->kvm, type);
206}
207
208static void vgic_destroy(struct kvm_device *dev)
209{
210 kfree(dev);
211}
212
42c8870f 213int kvm_register_vgic_device(unsigned long type)
c86c7721 214{
42c8870f
AP
215 int ret = -ENODEV;
216
c86c7721
EA
217 switch (type) {
218 case KVM_DEV_TYPE_ARM_VGIC_V2:
42c8870f
AP
219 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
220 KVM_DEV_TYPE_ARM_VGIC_V2);
c86c7721
EA
221 break;
222#ifdef CONFIG_KVM_ARM_VGIC_V3
223 case KVM_DEV_TYPE_ARM_VGIC_V3:
42c8870f
AP
224 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
225 KVM_DEV_TYPE_ARM_VGIC_V3);
c86c7721
EA
226 break;
227#endif
228 }
42c8870f
AP
229
230 return ret;
c86c7721
EA
231}
232
f94591e2
EA
233/** vgic_attr_regs_access: allows user space to read/write VGIC registers
234 *
235 * @dev: kvm device handle
236 * @attr: kvm device attribute
237 * @reg: address the value is read or written
238 * @is_write: write flag
239 *
240 */
241static int vgic_attr_regs_access(struct kvm_device *dev,
242 struct kvm_device_attr *attr,
243 u32 *reg, bool is_write)
244{
7d450e28
AP
245 gpa_t addr;
246 int cpuid, ret, c;
247 struct kvm_vcpu *vcpu, *tmp_vcpu;
248 int vcpu_lock_idx = -1;
249
250 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
251 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
252 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
253 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
254
255 mutex_lock(&dev->kvm->lock);
256
257 ret = vgic_init(dev->kvm);
258 if (ret)
259 goto out;
260
261 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
262 ret = -EINVAL;
263 goto out;
264 }
265
266 /*
267 * Any time a vcpu is run, vcpu_load is called which tries to grab the
268 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
269 * that no other VCPUs are run and fiddle with the vgic state while we
270 * access it.
271 */
272 ret = -EBUSY;
273 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
274 if (!mutex_trylock(&tmp_vcpu->mutex))
275 goto out;
276 vcpu_lock_idx = c;
277 }
278
279 switch (attr->group) {
280 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
878c569e 281 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
7d450e28
AP
282 break;
283 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
284 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
285 break;
286 default:
287 ret = -EINVAL;
288 break;
289 }
290
291out:
292 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
293 tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
294 mutex_unlock(&tmp_vcpu->mutex);
295 }
296
297 mutex_unlock(&dev->kvm->lock);
298 return ret;
f94591e2
EA
299}
300
c86c7721
EA
301/* V2 ops */
302
303static int vgic_v2_set_attr(struct kvm_device *dev,
304 struct kvm_device_attr *attr)
305{
fca25602
EA
306 int ret;
307
308 ret = vgic_set_common_attr(dev, attr);
f94591e2
EA
309 if (ret != -ENXIO)
310 return ret;
311
312 switch (attr->group) {
313 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
314 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
315 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
316 u32 reg;
317
318 if (get_user(reg, uaddr))
319 return -EFAULT;
fca25602 320
f94591e2
EA
321 return vgic_attr_regs_access(dev, attr, &reg, true);
322 }
323 }
324
325 return -ENXIO;
c86c7721
EA
326}
327
328static int vgic_v2_get_attr(struct kvm_device *dev,
329 struct kvm_device_attr *attr)
330{
fca25602
EA
331 int ret;
332
333 ret = vgic_get_common_attr(dev, attr);
f94591e2
EA
334 if (ret != -ENXIO)
335 return ret;
336
337 switch (attr->group) {
338 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
339 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
340 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
341 u32 reg = 0;
342
343 ret = vgic_attr_regs_access(dev, attr, &reg, false);
344 if (ret)
345 return ret;
346 return put_user(reg, uaddr);
347 }
348 }
349
350 return -ENXIO;
c86c7721
EA
351}
352
353static int vgic_v2_has_attr(struct kvm_device *dev,
354 struct kvm_device_attr *attr)
355{
fca25602 356 switch (attr->group) {
e5c30294
EA
357 case KVM_DEV_ARM_VGIC_GRP_ADDR:
358 switch (attr->attr) {
359 case KVM_VGIC_V2_ADDR_TYPE_DIST:
360 case KVM_VGIC_V2_ADDR_TYPE_CPU:
361 return 0;
362 }
363 break;
f94591e2
EA
364 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
365 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
366 return vgic_v2_has_attr_regs(dev, attr);
fca25602
EA
367 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
368 return 0;
afcc7c50
EA
369 case KVM_DEV_ARM_VGIC_GRP_CTRL:
370 switch (attr->attr) {
371 case KVM_DEV_ARM_VGIC_CTRL_INIT:
372 return 0;
373 }
fca25602 374 }
c86c7721
EA
375 return -ENXIO;
376}
377
378struct kvm_device_ops kvm_arm_vgic_v2_ops = {
379 .name = "kvm-arm-vgic-v2",
380 .create = vgic_create,
381 .destroy = vgic_destroy,
382 .set_attr = vgic_v2_set_attr,
383 .get_attr = vgic_v2_get_attr,
384 .has_attr = vgic_v2_has_attr,
385};
386
387/* V3 ops */
388
389#ifdef CONFIG_KVM_ARM_VGIC_V3
390
391static int vgic_v3_set_attr(struct kvm_device *dev,
392 struct kvm_device_attr *attr)
393{
fca25602 394 return vgic_set_common_attr(dev, attr);
c86c7721
EA
395}
396
397static int vgic_v3_get_attr(struct kvm_device *dev,
398 struct kvm_device_attr *attr)
399{
fca25602 400 return vgic_get_common_attr(dev, attr);
c86c7721
EA
401}
402
403static int vgic_v3_has_attr(struct kvm_device *dev,
404 struct kvm_device_attr *attr)
405{
fca25602 406 switch (attr->group) {
e5c30294
EA
407 case KVM_DEV_ARM_VGIC_GRP_ADDR:
408 switch (attr->attr) {
409 case KVM_VGIC_V3_ADDR_TYPE_DIST:
410 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
411 return 0;
412 }
413 break;
fca25602
EA
414 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
415 return 0;
afcc7c50
EA
416 case KVM_DEV_ARM_VGIC_GRP_CTRL:
417 switch (attr->attr) {
418 case KVM_DEV_ARM_VGIC_CTRL_INIT:
419 return 0;
420 }
fca25602 421 }
c86c7721
EA
422 return -ENXIO;
423}
424
425struct kvm_device_ops kvm_arm_vgic_v3_ops = {
426 .name = "kvm-arm-vgic-v3",
427 .create = vgic_create,
428 .destroy = vgic_destroy,
429 .set_attr = vgic_v3_set_attr,
430 .get_attr = vgic_v3_get_attr,
431 .has_attr = vgic_v3_has_attr,
432};
433
434#endif /* CONFIG_KVM_ARM_VGIC_V3 */