]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.19/kvm-arm-arm64-vgic-make-vgic_dist-lpi_list_lock-a-ra.patch
Linux 4.14.108
[thirdparty/kernel/stable-queue.git] / queue-4.19 / kvm-arm-arm64-vgic-make-vgic_dist-lpi_list_lock-a-ra.patch
1 From d00106b1e4dffd77854dc185313dcda92052cba4 Mon Sep 17 00:00:00 2001
2 From: Julien Thierry <julien.thierry@arm.com>
3 Date: Mon, 7 Jan 2019 15:06:16 +0000
4 Subject: KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock
5
6 [ Upstream commit fc3bc475231e12e9c0142f60100cf84d077c79e1 ]
7
8 vgic_dist->lpi_list_lock must always be taken with interrupts disabled as
9 it is used in interrupt context.
10
11 For configurations such as PREEMPT_RT_FULL, this means that it should
12 be a raw_spinlock since RT spinlocks are interruptible.
13
14 Signed-off-by: Julien Thierry <julien.thierry@arm.com>
15 Acked-by: Christoffer Dall <christoffer.dall@arm.com>
16 Acked-by: Marc Zyngier <marc.zyngier@arm.com>
17 Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
18 Signed-off-by: Sasha Levin <sashal@kernel.org>
19 ---
20 include/kvm/arm_vgic.h | 2 +-
21 virt/kvm/arm/vgic/vgic-init.c | 2 +-
22 virt/kvm/arm/vgic/vgic-its.c | 8 ++++----
23 virt/kvm/arm/vgic/vgic.c | 10 +++++-----
24 4 files changed, 11 insertions(+), 11 deletions(-)
25
26 diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
27 index 4f31f96bbfab..90ac450745f1 100644
28 --- a/include/kvm/arm_vgic.h
29 +++ b/include/kvm/arm_vgic.h
30 @@ -256,7 +256,7 @@ struct vgic_dist {
31 u64 propbaser;
32
33 /* Protects the lpi_list and the count value below. */
34 - spinlock_t lpi_list_lock;
35 + raw_spinlock_t lpi_list_lock;
36 struct list_head lpi_list_head;
37 int lpi_list_count;
38
39 diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
40 index c0c0b88af1d5..33e7ee814f7b 100644
41 --- a/virt/kvm/arm/vgic/vgic-init.c
42 +++ b/virt/kvm/arm/vgic/vgic-init.c
43 @@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
44 struct vgic_dist *dist = &kvm->arch.vgic;
45
46 INIT_LIST_HEAD(&dist->lpi_list_head);
47 - spin_lock_init(&dist->lpi_list_lock);
48 + raw_spin_lock_init(&dist->lpi_list_lock);
49 }
50
51 /* CREATION */
52 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
53 index 12502251727e..f376c82afb61 100644
54 --- a/virt/kvm/arm/vgic/vgic-its.c
55 +++ b/virt/kvm/arm/vgic/vgic-its.c
56 @@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
57 irq->target_vcpu = vcpu;
58 irq->group = 1;
59
60 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
61 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
62
63 /*
64 * There could be a race with another vgic_add_lpi(), so we need to
65 @@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
66 dist->lpi_list_count++;
67
68 out_unlock:
69 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
70 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
71
72 /*
73 * We "cache" the configuration table entries in our struct vgic_irq's.
74 @@ -339,7 +339,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
75 if (!intids)
76 return -ENOMEM;
77
78 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
79 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
80 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
81 if (i == irq_count)
82 break;
83 @@ -348,7 +348,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
84 continue;
85 intids[i++] = irq->intid;
86 }
87 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
88 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
89
90 *intid_ptr = intids;
91 return i;
92 diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
93 index f884a54b2601..c5165e3b80cb 100644
94 --- a/virt/kvm/arm/vgic/vgic.c
95 +++ b/virt/kvm/arm/vgic/vgic.c
96 @@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
97 struct vgic_irq *irq = NULL;
98 unsigned long flags;
99
100 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
101 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
102
103 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
104 if (irq->intid != intid)
105 @@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
106 irq = NULL;
107
108 out_unlock:
109 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
110 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
111
112 return irq;
113 }
114 @@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
115 if (irq->intid < VGIC_MIN_LPI)
116 return;
117
118 - spin_lock_irqsave(&dist->lpi_list_lock, flags);
119 + raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
120 if (!kref_put(&irq->refcount, vgic_irq_release)) {
121 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
122 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
123 return;
124 };
125
126 list_del(&irq->lpi_list);
127 dist->lpi_list_count--;
128 - spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
129 + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
130
131 kfree(irq);
132 }
133 --
134 2.19.1
135