]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.19.7/arm-arm64-kvm-keep-elrsr-aisr-in-sync-with-software-model.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.19.7 / arm-arm64-kvm-keep-elrsr-aisr-in-sync-with-software-model.patch
CommitLineData
c9c77b0b
GKH
1From ae705930fca6322600690df9dc1c7d0516145a93 Mon Sep 17 00:00:00 2001
2From: Christoffer Dall <christoffer.dall@linaro.org>
3Date: Fri, 13 Mar 2015 17:02:56 +0000
4Subject: arm/arm64: KVM: Keep elrsr/aisr in sync with software model
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9From: Christoffer Dall <christoffer.dall@linaro.org>
10
11commit ae705930fca6322600690df9dc1c7d0516145a93 upstream.
12
13There is an interesting bug in the vgic code, which manifests itself
14when the KVM run loop has a signal pending or needs a vmid generation
15rollover after having disabled interrupts but before actually switching
16to the guest.
17
18In this case, we flush the vgic as usual, but we sync back the vgic
19state and exit to userspace before entering the guest. The consequence
20is that we will be syncing the list registers back to the software model
21using the GICH_ELRSR and GICH_EISR from the last execution of the guest,
22potentially overwriting a list register containing an interrupt.
23
24This showed up during migration testing where we would capture a state
25where the VM has masked the arch timer but there were no interrupts,
26resulting in a hung test.
27
28Cc: Marc Zyngier <marc.zyngier@arm.com>
29Reported-by: Alex Bennee <alex.bennee@linaro.org>
30Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
31Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
32Acked-by: Marc Zyngier <marc.zyngier@arm.com>
33Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
34Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
35Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
36---
37 include/kvm/arm_vgic.h | 1 +
38 virt/kvm/arm/vgic-v2.c | 8 ++++++++
39 virt/kvm/arm/vgic-v3.c | 8 ++++++++
40 virt/kvm/arm/vgic.c | 16 ++++++++++++++++
41 4 files changed, 33 insertions(+)
42
43--- a/include/kvm/arm_vgic.h
44+++ b/include/kvm/arm_vgic.h
45@@ -113,6 +113,7 @@ struct vgic_ops {
46 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
47 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
48 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
49+ void (*clear_eisr)(struct kvm_vcpu *vcpu);
50 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
51 void (*enable_underflow)(struct kvm_vcpu *vcpu);
52 void (*disable_underflow)(struct kvm_vcpu *vcpu);
53--- a/virt/kvm/arm/vgic-v2.c
54+++ b/virt/kvm/arm/vgic-v2.c
55@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct
56 {
57 if (!(lr_desc.state & LR_STATE_MASK))
58 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
59+ else
60+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
61 }
62
63 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
64@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct
65 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
66 }
67
68+static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
69+{
70+ vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
71+}
72+
73 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
74 {
75 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
76@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops
77 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
78 .get_elrsr = vgic_v2_get_elrsr,
79 .get_eisr = vgic_v2_get_eisr,
80+ .clear_eisr = vgic_v2_clear_eisr,
81 .get_interrupt_status = vgic_v2_get_interrupt_status,
82 .enable_underflow = vgic_v2_enable_underflow,
83 .disable_underflow = vgic_v2_disable_underflow,
84--- a/virt/kvm/arm/vgic-v3.c
85+++ b/virt/kvm/arm/vgic-v3.c
86@@ -86,6 +86,8 @@ static void vgic_v3_sync_lr_elrsr(struct
87 {
88 if (!(lr_desc.state & LR_STATE_MASK))
89 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
90+ else
91+ vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
92 }
93
94 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
95@@ -98,6 +100,11 @@ static u64 vgic_v3_get_eisr(const struct
96 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
97 }
98
99+static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
100+{
101+ vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
102+}
103+
104 static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
105 {
106 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
107@@ -162,6 +169,7 @@ static const struct vgic_ops vgic_v3_ops
108 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
109 .get_elrsr = vgic_v3_get_elrsr,
110 .get_eisr = vgic_v3_get_eisr,
111+ .clear_eisr = vgic_v3_clear_eisr,
112 .get_interrupt_status = vgic_v3_get_interrupt_status,
113 .enable_underflow = vgic_v3_enable_underflow,
114 .disable_underflow = vgic_v3_disable_underflow,
115--- a/virt/kvm/arm/vgic.c
116+++ b/virt/kvm/arm/vgic.c
117@@ -1219,6 +1219,11 @@ static inline u64 vgic_get_eisr(struct k
118 return vgic_ops->get_eisr(vcpu);
119 }
120
121+static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
122+{
123+ vgic_ops->clear_eisr(vcpu);
124+}
125+
126 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
127 {
128 return vgic_ops->get_interrupt_status(vcpu);
129@@ -1258,6 +1263,7 @@ static void vgic_retire_lr(int lr_nr, in
130 vgic_set_lr(vcpu, lr_nr, vlr);
131 clear_bit(lr_nr, vgic_cpu->lr_used);
132 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
133+ vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
134 }
135
136 /*
137@@ -1313,6 +1319,7 @@ static bool vgic_queue_irq(struct kvm_vc
138 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
139 vlr.state |= LR_STATE_PENDING;
140 vgic_set_lr(vcpu, lr, vlr);
141+ vgic_sync_lr_elrsr(vcpu, lr, vlr);
142 return true;
143 }
144 }
145@@ -1334,6 +1341,7 @@ static bool vgic_queue_irq(struct kvm_vc
146 vlr.state |= LR_EOI_INT;
147
148 vgic_set_lr(vcpu, lr, vlr);
149+ vgic_sync_lr_elrsr(vcpu, lr, vlr);
150
151 return true;
152 }
153@@ -1502,6 +1510,14 @@ static bool vgic_process_maintenance(str
154 if (status & INT_STATUS_UNDERFLOW)
155 vgic_disable_underflow(vcpu);
156
157+ /*
158+ * In the next iterations of the vcpu loop, if we sync the vgic state
159+ * after flushing it, but before entering the guest (this happens for
160+ * pending signals and vmid rollovers), then make sure we don't pick
161+ * up any old maintenance interrupts here.
162+ */
163+ vgic_clear_eisr(vcpu);
164+
165 return level_pending;
166 }
167