]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
aa024c2f MZ |
2 | /* |
3 | * Copyright (C) 2012 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
aa024c2f MZ |
5 | */ |
6 | ||
09e6be12 | 7 | #include <linux/arm-smccc.h> |
cf5d3188 | 8 | #include <linux/preempt.h> |
aa024c2f | 9 | #include <linux/kvm_host.h> |
85bd0ba1 | 10 | #include <linux/uaccess.h> |
aa024c2f MZ |
11 | #include <linux/wait.h> |
12 | ||
79c64880 | 13 | #include <asm/cputype.h> |
aa024c2f | 14 | #include <asm/kvm_emulate.h> |
aa024c2f | 15 | |
1a2fb94e | 16 | #include <kvm/arm_psci.h> |
55009c6e | 17 | #include <kvm/arm_hypercalls.h> |
1a2fb94e | 18 | |
aa024c2f MZ |
19 | /* |
20 | * This is an implementation of the Power State Coordination Interface | |
21 | * as described in ARM document number ARM DEN 0022A. | |
22 | */ | |
23 | ||
e6bc13c8 AP |
24 | #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) |
25 | ||
26 | static unsigned long psci_affinity_mask(unsigned long affinity_level) | |
27 | { | |
28 | if (affinity_level <= 3) | |
29 | return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level); | |
30 | ||
31 | return 0; | |
32 | } | |
33 | ||
b376d02b AP |
34 | static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) |
35 | { | |
36 | /* | |
37 | * NOTE: For simplicity, we make VCPU suspend emulation to be | |
38 | * same-as WFI (Wait-for-interrupt) emulation. | |
39 | * | |
40 | * This means for KVM the wakeup events are interrupts and | |
41 | * this is consistent with intended use of StateID as described | |
42 | * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A). | |
43 | * | |
44 | * Further, we also treat power-down request to be same as | |
45 | * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2 | |
46 | * specification (ARM DEN 0022A). This means all suspend states | |
47 | * for KVM will preserve the register state. | |
48 | */ | |
49 | kvm_vcpu_block(vcpu); | |
6a6d73be | 50 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
b376d02b AP |
51 | |
52 | return PSCI_RET_SUCCESS; | |
53 | } | |
54 | ||
aa024c2f MZ |
55 | static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) |
56 | { | |
3781528e | 57 | vcpu->arch.power_off = true; |
7b244e2b | 58 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
424c989b | 59 | kvm_vcpu_kick(vcpu); |
aa024c2f MZ |
60 | } |
61 | ||
62 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |
63 | { | |
358b28f0 | 64 | struct vcpu_reset_state *reset_state; |
aa024c2f | 65 | struct kvm *kvm = source_vcpu->kvm; |
4429fc64 | 66 | struct kvm_vcpu *vcpu = NULL; |
aa024c2f | 67 | unsigned long cpu_id; |
aa024c2f | 68 | |
84684fec | 69 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
aa024c2f MZ |
70 | if (vcpu_mode_is_32bit(source_vcpu)) |
71 | cpu_id &= ~((u32) 0); | |
72 | ||
4429fc64 | 73 | vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); |
79c64880 | 74 | |
478a8237 CD |
75 | /* |
76 | * Make sure the caller requested a valid CPU and that the CPU is | |
77 | * turned off. | |
78 | */ | |
aa8aeefe | 79 | if (!vcpu) |
7d0f84aa | 80 | return PSCI_RET_INVALID_PARAMS; |
3781528e | 81 | if (!vcpu->arch.power_off) { |
a4097b35 | 82 | if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) |
aa8aeefe AP |
83 | return PSCI_RET_ALREADY_ON; |
84 | else | |
85 | return PSCI_RET_INVALID_PARAMS; | |
86 | } | |
aa024c2f | 87 | |
358b28f0 | 88 | reset_state = &vcpu->arch.reset_state; |
aa024c2f | 89 | |
358b28f0 | 90 | reset_state->pc = smccc_get_arg2(source_vcpu); |
aa024c2f | 91 | |
ce94fe93 | 92 | /* Propagate caller endianness */ |
358b28f0 | 93 | reset_state->be = kvm_vcpu_is_be(source_vcpu); |
ce94fe93 | 94 | |
aa8aeefe AP |
95 | /* |
96 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | |
97 | * the general puspose registers are undefined upon CPU_ON. | |
98 | */ | |
358b28f0 MZ |
99 | reset_state->r0 = smccc_get_arg3(source_vcpu); |
100 | ||
101 | WRITE_ONCE(reset_state->reset, true); | |
102 | kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); | |
aa024c2f | 103 | |
358b28f0 MZ |
104 | /* |
105 | * Make sure the reset request is observed if the change to | |
106 | * power_state is observed. | |
107 | */ | |
108 | smp_wmb(); | |
109 | ||
110 | vcpu->arch.power_off = false; | |
111 | kvm_vcpu_wake_up(vcpu); | |
aa024c2f | 112 | |
7d0f84aa | 113 | return PSCI_RET_SUCCESS; |
aa024c2f MZ |
114 | } |
115 | ||
e6bc13c8 AP |
116 | static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) |
117 | { | |
0c067292 | 118 | int i, matching_cpus = 0; |
e6bc13c8 AP |
119 | unsigned long mpidr; |
120 | unsigned long target_affinity; | |
121 | unsigned long target_affinity_mask; | |
122 | unsigned long lowest_affinity_level; | |
123 | struct kvm *kvm = vcpu->kvm; | |
124 | struct kvm_vcpu *tmp; | |
125 | ||
84684fec MZ |
126 | target_affinity = smccc_get_arg1(vcpu); |
127 | lowest_affinity_level = smccc_get_arg2(vcpu); | |
e6bc13c8 AP |
128 | |
129 | /* Determine target affinity mask */ | |
130 | target_affinity_mask = psci_affinity_mask(lowest_affinity_level); | |
131 | if (!target_affinity_mask) | |
132 | return PSCI_RET_INVALID_PARAMS; | |
133 | ||
134 | /* Ignore other bits of target affinity */ | |
135 | target_affinity &= target_affinity_mask; | |
136 | ||
137 | /* | |
138 | * If one or more VCPU matching target affinity are running | |
139 | * then ON else OFF | |
140 | */ | |
141 | kvm_for_each_vcpu(i, tmp, kvm) { | |
4429fc64 | 142 | mpidr = kvm_vcpu_get_mpidr_aff(tmp); |
0c067292 AS |
143 | if ((mpidr & target_affinity_mask) == target_affinity) { |
144 | matching_cpus++; | |
3781528e | 145 | if (!tmp->arch.power_off) |
0c067292 | 146 | return PSCI_0_2_AFFINITY_LEVEL_ON; |
e6bc13c8 AP |
147 | } |
148 | } | |
149 | ||
0c067292 AS |
150 | if (!matching_cpus) |
151 | return PSCI_RET_INVALID_PARAMS; | |
152 | ||
e6bc13c8 AP |
153 | return PSCI_0_2_AFFINITY_LEVEL_OFF; |
154 | } | |
155 | ||
4b123826 AP |
156 | static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) |
157 | { | |
cf5d3188 CD |
158 | int i; |
159 | struct kvm_vcpu *tmp; | |
160 | ||
161 | /* | |
162 | * The KVM ABI specifies that a system event exit may call KVM_RUN | |
163 | * again and may perform shutdown/reboot at a later time that when the | |
164 | * actual request is made. Since we are implementing PSCI and a | |
165 | * caller of PSCI reboot and shutdown expects that the system shuts | |
166 | * down or reboots immediately, let's make sure that VCPUs are not run | |
167 | * after this call is handled and before the VCPUs have been | |
168 | * re-initialized. | |
169 | */ | |
cc9b43f9 | 170 | kvm_for_each_vcpu(i, tmp, vcpu->kvm) |
3781528e | 171 | tmp->arch.power_off = true; |
7b244e2b | 172 | kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); |
cf5d3188 | 173 | |
4b123826 AP |
174 | memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); |
175 | vcpu->run->system_event.type = type; | |
176 | vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; | |
177 | } | |
178 | ||
179 | static void kvm_psci_system_off(struct kvm_vcpu *vcpu) | |
180 | { | |
181 | kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN); | |
182 | } | |
183 | ||
184 | static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) | |
185 | { | |
186 | kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); | |
187 | } | |
188 | ||
2890ac99 MZ |
189 | static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu) |
190 | { | |
191 | int i; | |
192 | ||
193 | /* | |
194 | * Zero the input registers' upper 32 bits. They will be fully | |
195 | * zeroed on exit, so we're fine changing them in place. | |
196 | */ | |
197 | for (i = 1; i < 4; i++) | |
198 | vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i))); | |
199 | } | |
200 | ||
fdc9999e MZ |
201 | static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn) |
202 | { | |
203 | switch(fn) { | |
204 | case PSCI_0_2_FN64_CPU_SUSPEND: | |
205 | case PSCI_0_2_FN64_CPU_ON: | |
206 | case PSCI_0_2_FN64_AFFINITY_INFO: | |
207 | /* Disallow these functions for 32bit guests */ | |
208 | if (vcpu_mode_is_32bit(vcpu)) | |
209 | return PSCI_RET_NOT_SUPPORTED; | |
210 | break; | |
211 | } | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
e8e7fcc5 | 216 | static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
7d0f84aa | 217 | { |
6c7a5dce | 218 | struct kvm *kvm = vcpu->kvm; |
84684fec | 219 | u32 psci_fn = smccc_get_function(vcpu); |
7d0f84aa | 220 | unsigned long val; |
6c7a5dce | 221 | int ret = 1; |
7d0f84aa | 222 | |
fdc9999e MZ |
223 | val = kvm_psci_check_allowed_function(vcpu, psci_fn); |
224 | if (val) | |
225 | goto out; | |
226 | ||
7d0f84aa AP |
227 | switch (psci_fn) { |
228 | case PSCI_0_2_FN_PSCI_VERSION: | |
229 | /* | |
230 | * Bits[31:16] = Major Version = 0 | |
231 | * Bits[15:0] = Minor Version = 2 | |
232 | */ | |
d0a144f1 | 233 | val = KVM_ARM_PSCI_0_2; |
7d0f84aa | 234 | break; |
b376d02b AP |
235 | case PSCI_0_2_FN_CPU_SUSPEND: |
236 | case PSCI_0_2_FN64_CPU_SUSPEND: | |
237 | val = kvm_psci_vcpu_suspend(vcpu); | |
238 | break; | |
7d0f84aa AP |
239 | case PSCI_0_2_FN_CPU_OFF: |
240 | kvm_psci_vcpu_off(vcpu); | |
241 | val = PSCI_RET_SUCCESS; | |
242 | break; | |
243 | case PSCI_0_2_FN_CPU_ON: | |
2890ac99 MZ |
244 | kvm_psci_narrow_to_32bit(vcpu); |
245 | fallthrough; | |
7d0f84aa | 246 | case PSCI_0_2_FN64_CPU_ON: |
6c7a5dce | 247 | mutex_lock(&kvm->lock); |
7d0f84aa | 248 | val = kvm_psci_vcpu_on(vcpu); |
6c7a5dce | 249 | mutex_unlock(&kvm->lock); |
7d0f84aa | 250 | break; |
e6bc13c8 | 251 | case PSCI_0_2_FN_AFFINITY_INFO: |
2890ac99 MZ |
252 | kvm_psci_narrow_to_32bit(vcpu); |
253 | fallthrough; | |
e6bc13c8 AP |
254 | case PSCI_0_2_FN64_AFFINITY_INFO: |
255 | val = kvm_psci_vcpu_affinity_info(vcpu); | |
256 | break; | |
bab0b430 AP |
257 | case PSCI_0_2_FN_MIGRATE_INFO_TYPE: |
258 | /* | |
259 | * Trusted OS is MP hence does not require migration | |
260 | * or | |
261 | * Trusted OS is not present | |
262 | */ | |
263 | val = PSCI_0_2_TOS_MP; | |
264 | break; | |
4b123826 AP |
265 | case PSCI_0_2_FN_SYSTEM_OFF: |
266 | kvm_psci_system_off(vcpu); | |
267 | /* | |
268 | * We should'nt be going back to guest VCPU after | |
269 | * receiving SYSTEM_OFF request. | |
270 | * | |
271 | * If user space accidently/deliberately resumes | |
272 | * guest VCPU after SYSTEM_OFF request then guest | |
273 | * VCPU should see internal failure from PSCI return | |
274 | * value. To achieve this, we preload r0 (or x0) with | |
275 | * PSCI return value INTERNAL_FAILURE. | |
276 | */ | |
277 | val = PSCI_RET_INTERNAL_FAILURE; | |
278 | ret = 0; | |
279 | break; | |
280 | case PSCI_0_2_FN_SYSTEM_RESET: | |
281 | kvm_psci_system_reset(vcpu); | |
282 | /* | |
283 | * Same reason as SYSTEM_OFF for preloading r0 (or x0) | |
284 | * with PSCI return value INTERNAL_FAILURE. | |
285 | */ | |
286 | val = PSCI_RET_INTERNAL_FAILURE; | |
287 | ret = 0; | |
288 | break; | |
7d0f84aa | 289 | default: |
e2d99736 LP |
290 | val = PSCI_RET_NOT_SUPPORTED; |
291 | break; | |
7d0f84aa AP |
292 | } |
293 | ||
fdc9999e | 294 | out: |
84684fec | 295 | smccc_set_retval(vcpu, val, 0, 0, 0); |
4b123826 | 296 | return ret; |
7d0f84aa AP |
297 | } |
298 | ||
58e0b223 MZ |
299 | static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) |
300 | { | |
301 | u32 psci_fn = smccc_get_function(vcpu); | |
302 | u32 feature; | |
303 | unsigned long val; | |
304 | int ret = 1; | |
305 | ||
306 | switch(psci_fn) { | |
307 | case PSCI_0_2_FN_PSCI_VERSION: | |
308 | val = KVM_ARM_PSCI_1_0; | |
309 | break; | |
310 | case PSCI_1_0_FN_PSCI_FEATURES: | |
311 | feature = smccc_get_arg1(vcpu); | |
fdc9999e MZ |
312 | val = kvm_psci_check_allowed_function(vcpu, feature); |
313 | if (val) | |
314 | break; | |
315 | ||
58e0b223 MZ |
316 | switch(feature) { |
317 | case PSCI_0_2_FN_PSCI_VERSION: | |
318 | case PSCI_0_2_FN_CPU_SUSPEND: | |
319 | case PSCI_0_2_FN64_CPU_SUSPEND: | |
320 | case PSCI_0_2_FN_CPU_OFF: | |
321 | case PSCI_0_2_FN_CPU_ON: | |
322 | case PSCI_0_2_FN64_CPU_ON: | |
323 | case PSCI_0_2_FN_AFFINITY_INFO: | |
324 | case PSCI_0_2_FN64_AFFINITY_INFO: | |
325 | case PSCI_0_2_FN_MIGRATE_INFO_TYPE: | |
326 | case PSCI_0_2_FN_SYSTEM_OFF: | |
327 | case PSCI_0_2_FN_SYSTEM_RESET: | |
328 | case PSCI_1_0_FN_PSCI_FEATURES: | |
09e6be12 | 329 | case ARM_SMCCC_VERSION_FUNC_ID: |
58e0b223 MZ |
330 | val = 0; |
331 | break; | |
332 | default: | |
333 | val = PSCI_RET_NOT_SUPPORTED; | |
334 | break; | |
335 | } | |
336 | break; | |
337 | default: | |
338 | return kvm_psci_0_2_call(vcpu); | |
339 | } | |
340 | ||
341 | smccc_set_retval(vcpu, val, 0, 0, 0); | |
342 | return ret; | |
343 | } | |
344 | ||
e8e7fcc5 | 345 | static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
aa024c2f | 346 | { |
6c7a5dce | 347 | struct kvm *kvm = vcpu->kvm; |
84684fec | 348 | u32 psci_fn = smccc_get_function(vcpu); |
aa024c2f MZ |
349 | unsigned long val; |
350 | ||
351 | switch (psci_fn) { | |
352 | case KVM_PSCI_FN_CPU_OFF: | |
353 | kvm_psci_vcpu_off(vcpu); | |
7d0f84aa | 354 | val = PSCI_RET_SUCCESS; |
aa024c2f MZ |
355 | break; |
356 | case KVM_PSCI_FN_CPU_ON: | |
6c7a5dce | 357 | mutex_lock(&kvm->lock); |
aa024c2f | 358 | val = kvm_psci_vcpu_on(vcpu); |
6c7a5dce | 359 | mutex_unlock(&kvm->lock); |
aa024c2f | 360 | break; |
e2d99736 | 361 | default: |
7d0f84aa | 362 | val = PSCI_RET_NOT_SUPPORTED; |
aa024c2f | 363 | break; |
aa024c2f MZ |
364 | } |
365 | ||
84684fec | 366 | smccc_set_retval(vcpu, val, 0, 0, 0); |
e8e7fcc5 | 367 | return 1; |
aa024c2f | 368 | } |
7d0f84aa AP |
369 | |
370 | /** | |
371 | * kvm_psci_call - handle PSCI call if r0 value is in range | |
372 | * @vcpu: Pointer to the VCPU struct | |
373 | * | |
374 | * Handle PSCI calls from guests through traps from HVC instructions. | |
e8e7fcc5 AP |
375 | * The calling convention is similar to SMC calls to the secure world |
376 | * where the function number is placed in r0. | |
377 | * | |
378 | * This function returns: > 0 (success), 0 (success but exit to user | |
379 | * space), and < 0 (errors) | |
380 | * | |
381 | * Errors: | |
382 | * -EINVAL: Unrecognized PSCI function | |
7d0f84aa | 383 | */ |
55009c6e | 384 | int kvm_psci_call(struct kvm_vcpu *vcpu) |
7d0f84aa | 385 | { |
a4097b35 | 386 | switch (kvm_psci_version(vcpu, vcpu->kvm)) { |
58e0b223 MZ |
387 | case KVM_ARM_PSCI_1_0: |
388 | return kvm_psci_1_0_call(vcpu); | |
7d0f84aa AP |
389 | case KVM_ARM_PSCI_0_2: |
390 | return kvm_psci_0_2_call(vcpu); | |
391 | case KVM_ARM_PSCI_0_1: | |
392 | return kvm_psci_0_1_call(vcpu); | |
393 | default: | |
e8e7fcc5 | 394 | return -EINVAL; |
7d0f84aa AP |
395 | }; |
396 | } | |
09e6be12 | 397 | |
85bd0ba1 MZ |
398 | int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) |
399 | { | |
99adb567 | 400 | return 3; /* PSCI version and two workaround registers */ |
85bd0ba1 MZ |
401 | } |
402 | ||
403 | int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
404 | { | |
99adb567 AP |
405 | if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++)) |
406 | return -EFAULT; | |
407 | ||
408 | if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++)) | |
409 | return -EFAULT; | |
410 | ||
411 | if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++)) | |
85bd0ba1 MZ |
412 | return -EFAULT; |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
99adb567 AP |
417 | #define KVM_REG_FEATURE_LEVEL_WIDTH 4 |
418 | #define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1) | |
419 | ||
420 | /* | |
421 | * Convert the workaround level into an easy-to-compare number, where higher | |
422 | * values mean better protection. | |
423 | */ | |
424 | static int get_kernel_wa_level(u64 regid) | |
425 | { | |
426 | switch (regid) { | |
427 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: | |
428 | switch (kvm_arm_harden_branch_predictor()) { | |
429 | case KVM_BP_HARDEN_UNKNOWN: | |
430 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; | |
431 | case KVM_BP_HARDEN_WA_NEEDED: | |
432 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL; | |
433 | case KVM_BP_HARDEN_NOT_REQUIRED: | |
434 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED; | |
435 | } | |
436 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; | |
437 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: | |
438 | switch (kvm_arm_have_ssbd()) { | |
439 | case KVM_SSBD_FORCE_DISABLE: | |
440 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; | |
441 | case KVM_SSBD_KERNEL: | |
442 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL; | |
443 | case KVM_SSBD_FORCE_ENABLE: | |
444 | case KVM_SSBD_MITIGATED: | |
445 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; | |
446 | case KVM_SSBD_UNKNOWN: | |
447 | default: | |
448 | return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN; | |
449 | } | |
450 | } | |
451 | ||
452 | return -EINVAL; | |
453 | } | |
454 | ||
85bd0ba1 MZ |
455 | int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
456 | { | |
99adb567 AP |
457 | void __user *uaddr = (void __user *)(long)reg->addr; |
458 | u64 val; | |
85bd0ba1 | 459 | |
99adb567 AP |
460 | switch (reg->id) { |
461 | case KVM_REG_ARM_PSCI_VERSION: | |
85bd0ba1 | 462 | val = kvm_psci_version(vcpu, vcpu->kvm); |
99adb567 AP |
463 | break; |
464 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: | |
465 | val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; | |
466 | break; | |
467 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: | |
468 | val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; | |
85bd0ba1 | 469 | |
99adb567 AP |
470 | if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && |
471 | kvm_arm_get_vcpu_workaround_2_flag(vcpu)) | |
472 | val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED; | |
473 | break; | |
474 | default: | |
475 | return -ENOENT; | |
85bd0ba1 MZ |
476 | } |
477 | ||
99adb567 AP |
478 | if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id))) |
479 | return -EFAULT; | |
480 | ||
481 | return 0; | |
85bd0ba1 MZ |
482 | } |
483 | ||
484 | int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
485 | { | |
99adb567 AP |
486 | void __user *uaddr = (void __user *)(long)reg->addr; |
487 | u64 val; | |
488 | int wa_level; | |
489 | ||
490 | if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) | |
491 | return -EFAULT; | |
85bd0ba1 | 492 | |
99adb567 AP |
493 | switch (reg->id) { |
494 | case KVM_REG_ARM_PSCI_VERSION: | |
495 | { | |
496 | bool wants_02; | |
85bd0ba1 MZ |
497 | |
498 | wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); | |
499 | ||
500 | switch (val) { | |
501 | case KVM_ARM_PSCI_0_1: | |
502 | if (wants_02) | |
503 | return -EINVAL; | |
504 | vcpu->kvm->arch.psci_version = val; | |
505 | return 0; | |
506 | case KVM_ARM_PSCI_0_2: | |
507 | case KVM_ARM_PSCI_1_0: | |
508 | if (!wants_02) | |
509 | return -EINVAL; | |
510 | vcpu->kvm->arch.psci_version = val; | |
511 | return 0; | |
512 | } | |
99adb567 AP |
513 | break; |
514 | } | |
515 | ||
516 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: | |
517 | if (val & ~KVM_REG_FEATURE_LEVEL_MASK) | |
518 | return -EINVAL; | |
519 | ||
520 | if (get_kernel_wa_level(reg->id) < val) | |
521 | return -EINVAL; | |
522 | ||
523 | return 0; | |
524 | ||
525 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: | |
526 | if (val & ~(KVM_REG_FEATURE_LEVEL_MASK | | |
527 | KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) | |
528 | return -EINVAL; | |
529 | ||
530 | wa_level = val & KVM_REG_FEATURE_LEVEL_MASK; | |
531 | ||
532 | if (get_kernel_wa_level(reg->id) < wa_level) | |
533 | return -EINVAL; | |
534 | ||
535 | /* The enabled bit must not be set unless the level is AVAIL. */ | |
536 | if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && | |
537 | wa_level != val) | |
538 | return -EINVAL; | |
539 | ||
540 | /* Are we finished or do we need to check the enable bit ? */ | |
541 | if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL) | |
542 | return 0; | |
543 | ||
544 | /* | |
545 | * If this kernel supports the workaround to be switched on | |
546 | * or off, make sure it matches the requested setting. | |
547 | */ | |
548 | switch (wa_level) { | |
549 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: | |
550 | kvm_arm_set_vcpu_workaround_2_flag(vcpu, | |
551 | val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED); | |
552 | break; | |
553 | case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: | |
554 | kvm_arm_set_vcpu_workaround_2_flag(vcpu, true); | |
555 | break; | |
556 | } | |
557 | ||
558 | return 0; | |
559 | default: | |
560 | return -ENOENT; | |
85bd0ba1 MZ |
561 | } |
562 | ||
563 | return -EINVAL; | |
564 | } |