]> git.ipfire.org Git - thirdparty/linux.git/blame - virt/kvm/arm/vgic/vgic-init.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[thirdparty/linux.git] / virt / kvm / arm / vgic / vgic-init.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
90977732
EA
2/*
3 * Copyright (C) 2015, 2016 ARM Ltd.
90977732
EA
4 */
5
6#include <linux/uaccess.h>
7#include <linux/interrupt.h>
8#include <linux/cpu.h>
9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h>
11#include <asm/kvm_mmu.h>
12#include "vgic.h"
13
ad275b8b
EA
14/*
15 * Initialization rules: there are multiple stages to the vgic
966e0149
CD
16 * initialization, both for the distributor and the CPU interfaces. The basic
17 * idea is that even though the VGIC is not functional or not requested from
18 * user space, the critical path of the run loop can still call VGIC functions
19 * that just won't do anything, without them having to check additional
20 * initialization flags to ensure they don't look at uninitialized data
21 * structures.
ad275b8b
EA
22 *
23 * Distributor:
24 *
25 * - kvm_vgic_early_init(): initialization of static data that doesn't
26 * depend on any sizing information or emulation type. No allocation
27 * is allowed there.
28 *
29 * - vgic_init(): allocation and initialization of the generic data
30 * structures that depend on sizing information (number of CPUs,
31 * number of interrupts). Also initializes the vcpu specific data
32 * structures. Can be executed lazily for GICv2.
33 *
34 * CPU Interface:
35 *
5ec17fba 36 * - kvm_vgic_vcpu_init(): initialization of static data that
ad275b8b
EA
37 * doesn't depend on any sizing information or emulation type. No
38 * allocation is allowed there.
39 */
40
41/* EARLY INIT */
42
966e0149
CD
43/**
44 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
45 * @kvm: The VM whose VGIC districutor should be initialized
46 *
47 * Only do initialization of static structures that don't require any
48 * allocation or sizing information from userspace. vgic_init() called
49 * kvm_vgic_dist_init() which takes care of the rest.
ad275b8b
EA
50 */
51void kvm_vgic_early_init(struct kvm *kvm)
52{
966e0149
CD
53 struct vgic_dist *dist = &kvm->arch.vgic;
54
55 INIT_LIST_HEAD(&dist->lpi_list_head);
fc3bc475 56 raw_spin_lock_init(&dist->lpi_list_lock);
ad275b8b
EA
57}
58
5e6431da
EA
59/* CREATION */
60
61/**
62 * kvm_vgic_create: triggered by the instantiation of the VGIC device by
63 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
64 * or through the generic KVM_CREATE_DEVICE API ioctl.
65 * irqchip_in_kernel() tells you if this function succeeded or not.
ad275b8b
EA
66 * @kvm: kvm struct pointer
67 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
5e6431da
EA
68 */
69int kvm_vgic_create(struct kvm *kvm, u32 type)
70{
71 int i, vcpu_lock_idx = -1, ret;
72 struct kvm_vcpu *vcpu;
73
a28ebea2
CD
74 if (irqchip_in_kernel(kvm))
75 return -EEXIST;
5e6431da
EA
76
77 /*
78 * This function is also called by the KVM_CREATE_IRQCHIP handler,
79 * which had no chance yet to check the availability of the GICv2
80 * emulation. So check this here again. KVM_CREATE_DEVICE does
81 * the proper checks already.
82 */
83 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
a28ebea2
CD
84 !kvm_vgic_global_state.can_emulate_gicv2)
85 return -ENODEV;
5e6431da
EA
86
87 /*
88 * Any time a vcpu is run, vcpu_load is called which tries to grab the
89 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
90 * that no other VCPUs are run while we create the vgic.
91 */
92 ret = -EBUSY;
93 kvm_for_each_vcpu(i, vcpu, kvm) {
94 if (!mutex_trylock(&vcpu->mutex))
95 goto out_unlock;
96 vcpu_lock_idx = i;
97 }
98
99 kvm_for_each_vcpu(i, vcpu, kvm) {
100 if (vcpu->arch.has_run_once)
101 goto out_unlock;
102 }
103 ret = 0;
104
105 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
106 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
107 else
108 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
109
110 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
111 ret = -E2BIG;
112 goto out_unlock;
113 }
114
115 kvm->arch.vgic.in_kernel = true;
116 kvm->arch.vgic.vgic_model = type;
117
5e6431da 118 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
dbd9733a
EA
119
120 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
121 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
122 else
123 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
5e6431da
EA
124
125out_unlock:
126 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
127 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
128 mutex_unlock(&vcpu->mutex);
129 }
5e6431da
EA
130 return ret;
131}
132
ad275b8b
EA
133/* INIT/DESTROY */
134
135/**
136 * kvm_vgic_dist_init: initialize the dist data structures
137 * @kvm: kvm struct pointer
138 * @nr_spis: number of spis, frozen by caller
139 */
140static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
141{
142 struct vgic_dist *dist = &kvm->arch.vgic;
143 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
144 int i;
145
146 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
147 if (!dist->spis)
148 return -ENOMEM;
149
150 /*
151 * In the following code we do not take the irq struct lock since
152 * no other action on irq structs can happen while the VGIC is
153 * not initialized yet:
154 * If someone wants to inject an interrupt or does a MMIO access, we
155 * require prior initialization in case of a virtual GICv3 or trigger
156 * initialization when using a virtual GICv2.
157 */
158 for (i = 0; i < nr_spis; i++) {
159 struct vgic_irq *irq = &dist->spis[i];
160
161 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
162 INIT_LIST_HEAD(&irq->ap_list);
8fa3adb8 163 raw_spin_lock_init(&irq->irq_lock);
ad275b8b
EA
164 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0;
5dd4b924 166 kref_init(&irq->refcount);
8df3c8f3 167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
ad275b8b 168 irq->targets = 0;
8df3c8f3
CD
169 irq->group = 0;
170 } else {
ad275b8b 171 irq->mpidr = 0;
8df3c8f3
CD
172 irq->group = 1;
173 }
ad275b8b
EA
174 }
175 return 0;
176}
177
178/**
5ec17fba
EA
179 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
180 * structures and register VCPU-specific KVM iodevs
181 *
1aab6f46 182 * @vcpu: pointer to the VCPU being created and initialized
5ec17fba
EA
183 *
184 * Only do initialization, but do not actually enable the
185 * VGIC CPU interface
ad275b8b 186 */
1aab6f46
CD
187int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
188{
5ec17fba 189 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1aab6f46 190 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
5ec17fba
EA
191 int ret = 0;
192 int i;
193
c011f4ea
EA
194 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
195 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
196
5ec17fba 197 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
e08d8d29 198 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
5ec17fba
EA
199
200 /*
201 * Enable and configure all SGIs to be edge-triggered and
202 * configure all PPIs as level-triggered.
203 */
204 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
205 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
206
207 INIT_LIST_HEAD(&irq->ap_list);
8fa3adb8 208 raw_spin_lock_init(&irq->irq_lock);
5ec17fba
EA
209 irq->intid = i;
210 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */
216 irq->enabled = 1;
217 irq->config = VGIC_CONFIG_EDGE;
218 } else {
219 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL;
221 }
8df3c8f3 222
8df3c8f3
CD
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
5ec17fba 227 }
1aab6f46
CD
228
229 if (!irqchip_in_kernel(vcpu->kvm))
230 return 0;
231
232 /*
233 * If we are creating a VCPU with a GICv3 we must also register the
234 * KVM io device for the redistributor that belongs to this VCPU.
235 */
552c9f47
CD
236 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
237 mutex_lock(&vcpu->kvm->lock);
1aab6f46 238 ret = vgic_register_redist_iodev(vcpu);
552c9f47
CD
239 mutex_unlock(&vcpu->kvm->lock);
240 }
1aab6f46
CD
241 return ret;
242}
243
443c3a9e 244static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
ad275b8b 245{
ad275b8b
EA
246 if (kvm_vgic_global_state.type == VGIC_V2)
247 vgic_v2_enable(vcpu);
248 else
249 vgic_v3_enable(vcpu);
250}
251
252/*
253 * vgic_init: allocates and initializes dist and vcpu data structures
254 * depending on two dimensioning parameters:
255 * - the number of spis
256 * - the number of vcpus
257 * The function is generally called when nr_spis has been explicitly set
258 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
259 * vgic_initialized() returns true when this function has succeeded.
260 * Must be called with kvm->lock held!
261 */
262int vgic_init(struct kvm *kvm)
263{
264 struct vgic_dist *dist = &kvm->arch.vgic;
265 struct kvm_vcpu *vcpu;
ab2d5eb0 266 int ret = 0, i, idx;
ad275b8b
EA
267
268 if (vgic_initialized(kvm))
269 return 0;
270
1d47191d
CD
271 /* Are we also in the middle of creating a VCPU? */
272 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
273 return -EBUSY;
274
ad275b8b
EA
275 /* freeze the number of spis */
276 if (!dist->nr_spis)
277 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
278
279 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
280 if (ret)
281 goto out;
282
ab2d5eb0
CD
283 /* Initialize groups on CPUs created before the VGIC type was known */
284 kvm_for_each_vcpu(idx, vcpu, kvm) {
285 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
286
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
290 irq->group = 1;
291 else
292 irq->group = 0;
293 }
294 }
295
f8f85dc0
CD
296 if (vgic_has_its(kvm)) {
297 ret = vgic_v4_init(kvm);
298 if (ret)
299 goto out;
300 }
74fe55dc 301
ad275b8b 302 kvm_for_each_vcpu(i, vcpu, kvm)
443c3a9e 303 kvm_vgic_vcpu_enable(vcpu);
ad275b8b 304
180ae7b1
EA
305 ret = kvm_vgic_setup_default_irq_routing(kvm);
306 if (ret)
307 goto out;
308
10f92c4c
CD
309 vgic_debug_init(kvm);
310
d53c2c29 311 dist->implementation_rev = 2;
ad275b8b 312 dist->initialized = true;
328e5664 313
ad275b8b
EA
314out:
315 return ret;
316}
317
318static void kvm_vgic_dist_destroy(struct kvm *kvm)
319{
320 struct vgic_dist *dist = &kvm->arch.vgic;
dbd9733a 321 struct vgic_redist_region *rdreg, *next;
ad275b8b 322
ad275b8b
EA
323 dist->ready = false;
324 dist->initialized = false;
325
326 kfree(dist->spis);
9153ab72 327 dist->spis = NULL;
ad275b8b 328 dist->nr_spis = 0;
74fe55dc 329
dbd9733a
EA
330 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
331 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
332 list_del(&rdreg->list);
333 kfree(rdreg);
334 }
335 INIT_LIST_HEAD(&dist->rd_regions);
336 }
337
74fe55dc
MZ
338 if (vgic_supports_direct_msis(kvm))
339 vgic_v4_teardown(kvm);
ad275b8b
EA
340}
341
342void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
343{
344 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
345
346 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
347}
348
1193e6ae
MZ
349/* To be called with kvm->lock held */
350static void __kvm_vgic_destroy(struct kvm *kvm)
ad275b8b
EA
351{
352 struct kvm_vcpu *vcpu;
353 int i;
354
10f92c4c
CD
355 vgic_debug_destroy(kvm);
356
ad275b8b
EA
357 kvm_vgic_dist_destroy(kvm);
358
359 kvm_for_each_vcpu(i, vcpu, kvm)
360 kvm_vgic_vcpu_destroy(vcpu);
361}
362
1193e6ae
MZ
363void kvm_vgic_destroy(struct kvm *kvm)
364{
365 mutex_lock(&kvm->lock);
366 __kvm_vgic_destroy(kvm);
367 mutex_unlock(&kvm->lock);
368}
369
ad275b8b
EA
370/**
371 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
372 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
373 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
374 * @kvm: kvm struct pointer
375 */
376int vgic_lazy_init(struct kvm *kvm)
377{
378 int ret = 0;
379
380 if (unlikely(!vgic_initialized(kvm))) {
381 /*
382 * We only provide the automatic initialization of the VGIC
383 * for the legacy case of a GICv2. Any other type must
384 * be explicitly initialized once setup with the respective
385 * KVM device call.
386 */
387 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
388 return -EBUSY;
389
390 mutex_lock(&kvm->lock);
391 ret = vgic_init(kvm);
392 mutex_unlock(&kvm->lock);
393 }
394
395 return ret;
396}
397
b0442ee2
EA
398/* RESOURCE MAPPING */
399
400/**
401 * Map the MMIO regions depending on the VGIC model exposed to the guest
402 * called on the first VCPU run.
403 * Also map the virtual CPU interface into the VM.
404 * v2/v3 derivatives call vgic_init if not already done.
405 * vgic_ready() returns true if this function has succeeded.
406 * @kvm: kvm struct pointer
407 */
408int kvm_vgic_map_resources(struct kvm *kvm)
409{
410 struct vgic_dist *dist = &kvm->arch.vgic;
411 int ret = 0;
412
413 mutex_lock(&kvm->lock);
414 if (!irqchip_in_kernel(kvm))
415 goto out;
416
417 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
418 ret = vgic_v2_map_resources(kvm);
419 else
420 ret = vgic_v3_map_resources(kvm);
1193e6ae
MZ
421
422 if (ret)
423 __kvm_vgic_destroy(kvm);
424
b0442ee2
EA
425out:
426 mutex_unlock(&kvm->lock);
427 return ret;
428}
429
90977732
EA
430/* GENERIC PROBE */
431
15d7e3d3 432static int vgic_init_cpu_starting(unsigned int cpu)
90977732
EA
433{
434 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
15d7e3d3 435 return 0;
90977732
EA
436}
437
90977732 438
15d7e3d3
AMG
439static int vgic_init_cpu_dying(unsigned int cpu)
440{
441 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
442 return 0;
90977732
EA
443}
444
90977732
EA
445static irqreturn_t vgic_maintenance_handler(int irq, void *data)
446{
447 /*
448 * We cannot rely on the vgic maintenance interrupt to be
449 * delivered synchronously. This means we can only use it to
450 * exit the VM, and we perform the handling of EOIed
c3616a07 451 * interrupts on the exit path (see vgic_fold_lr_state).
90977732
EA
452 */
453 return IRQ_HANDLED;
454}
455
5b0d2cc2
CD
456/**
457 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
458 *
459 * For a specific CPU, initialize the GIC VE hardware.
460 */
461void kvm_vgic_init_cpu_hardware(void)
462{
463 BUG_ON(preemptible());
464
465 /*
466 * We want to make sure the list registers start out clear so that we
467 * only have the program the used registers.
468 */
469 if (kvm_vgic_global_state.type == VGIC_V2)
470 vgic_v2_init_lrs();
471 else
472 kvm_call_hyp(__vgic_v3_init_lrs);
473}
474
90977732
EA
475/**
476 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
477 * according to the host GIC model. Accordingly calls either
478 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
479 * instantiated by a guest later on .
480 */
481int kvm_vgic_hyp_init(void)
482{
483 const struct gic_kvm_info *gic_kvm_info;
484 int ret;
485
486 gic_kvm_info = gic_get_kvm_info();
487 if (!gic_kvm_info)
488 return -ENODEV;
489
490 if (!gic_kvm_info->maint_irq) {
491 kvm_err("No vgic maintenance irq\n");
492 return -ENXIO;
493 }
494
495 switch (gic_kvm_info->type) {
496 case GIC_V2:
497 ret = vgic_v2_probe(gic_kvm_info);
498 break;
499 case GIC_V3:
500 ret = vgic_v3_probe(gic_kvm_info);
5a7a8426
VM
501 if (!ret) {
502 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
503 kvm_info("GIC system register CPU interface enabled\n");
504 }
90977732
EA
505 break;
506 default:
507 ret = -ENODEV;
508 };
509
510 if (ret)
511 return ret;
512
513 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
514 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
515 vgic_maintenance_handler,
516 "vgic", kvm_get_running_vcpus());
517 if (ret) {
518 kvm_err("Cannot register interrupt %d\n",
519 kvm_vgic_global_state.maint_irq);
520 return ret;
521 }
522
15d7e3d3 523 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
73c1b41e 524 "kvm/arm/vgic:starting",
15d7e3d3 525 vgic_init_cpu_starting, vgic_init_cpu_dying);
90977732
EA
526 if (ret) {
527 kvm_err("Cannot register vgic CPU notifier\n");
528 goto out_free_irq;
529 }
530
90977732
EA
531 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
532 return 0;
533
534out_free_irq:
535 free_percpu_irq(kvm_vgic_global_state.maint_irq,
536 kvm_get_running_vcpus());
537 return ret;
538}