1 // SPDX-License-Identifier: GPL-2.0
3 * hosting IBM Z kernel virtual machines (s390x)
5 * Copyright IBM Corp. 2008, 2020
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
10 * Jason J. Herne <jjherne@us.ibm.com>
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/lowcore.h>
41 #include <asm/switch_to.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
48 #include <asm/fpu/api.h>
53 #define CREATE_TRACE_POINTS
55 #include "trace-s390.h"
57 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
62 const struct _kvm_stats_desc kvm_vm_stats_desc
[] = {
63 KVM_GENERIC_VM_STATS(),
64 STATS_DESC_COUNTER(VM
, inject_io
),
65 STATS_DESC_COUNTER(VM
, inject_float_mchk
),
66 STATS_DESC_COUNTER(VM
, inject_pfault_done
),
67 STATS_DESC_COUNTER(VM
, inject_service_signal
),
68 STATS_DESC_COUNTER(VM
, inject_virtio
),
69 STATS_DESC_COUNTER(VM
, aen_forward
),
70 STATS_DESC_COUNTER(VM
, gmap_shadow_reuse
),
71 STATS_DESC_COUNTER(VM
, gmap_shadow_create
),
72 STATS_DESC_COUNTER(VM
, gmap_shadow_r1_entry
),
73 STATS_DESC_COUNTER(VM
, gmap_shadow_r2_entry
),
74 STATS_DESC_COUNTER(VM
, gmap_shadow_r3_entry
),
75 STATS_DESC_COUNTER(VM
, gmap_shadow_sg_entry
),
76 STATS_DESC_COUNTER(VM
, gmap_shadow_pg_entry
),
79 const struct kvm_stats_header kvm_vm_stats_header
= {
80 .name_size
= KVM_STATS_NAME_SIZE
,
81 .num_desc
= ARRAY_SIZE(kvm_vm_stats_desc
),
82 .id_offset
= sizeof(struct kvm_stats_header
),
83 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
84 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
85 sizeof(kvm_vm_stats_desc
),
88 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
89 KVM_GENERIC_VCPU_STATS(),
90 STATS_DESC_COUNTER(VCPU
, exit_userspace
),
91 STATS_DESC_COUNTER(VCPU
, exit_null
),
92 STATS_DESC_COUNTER(VCPU
, exit_external_request
),
93 STATS_DESC_COUNTER(VCPU
, exit_io_request
),
94 STATS_DESC_COUNTER(VCPU
, exit_external_interrupt
),
95 STATS_DESC_COUNTER(VCPU
, exit_stop_request
),
96 STATS_DESC_COUNTER(VCPU
, exit_validity
),
97 STATS_DESC_COUNTER(VCPU
, exit_instruction
),
98 STATS_DESC_COUNTER(VCPU
, exit_pei
),
99 STATS_DESC_COUNTER(VCPU
, halt_no_poll_steal
),
100 STATS_DESC_COUNTER(VCPU
, instruction_lctl
),
101 STATS_DESC_COUNTER(VCPU
, instruction_lctlg
),
102 STATS_DESC_COUNTER(VCPU
, instruction_stctl
),
103 STATS_DESC_COUNTER(VCPU
, instruction_stctg
),
104 STATS_DESC_COUNTER(VCPU
, exit_program_interruption
),
105 STATS_DESC_COUNTER(VCPU
, exit_instr_and_program
),
106 STATS_DESC_COUNTER(VCPU
, exit_operation_exception
),
107 STATS_DESC_COUNTER(VCPU
, deliver_ckc
),
108 STATS_DESC_COUNTER(VCPU
, deliver_cputm
),
109 STATS_DESC_COUNTER(VCPU
, deliver_external_call
),
110 STATS_DESC_COUNTER(VCPU
, deliver_emergency_signal
),
111 STATS_DESC_COUNTER(VCPU
, deliver_service_signal
),
112 STATS_DESC_COUNTER(VCPU
, deliver_virtio
),
113 STATS_DESC_COUNTER(VCPU
, deliver_stop_signal
),
114 STATS_DESC_COUNTER(VCPU
, deliver_prefix_signal
),
115 STATS_DESC_COUNTER(VCPU
, deliver_restart_signal
),
116 STATS_DESC_COUNTER(VCPU
, deliver_program
),
117 STATS_DESC_COUNTER(VCPU
, deliver_io
),
118 STATS_DESC_COUNTER(VCPU
, deliver_machine_check
),
119 STATS_DESC_COUNTER(VCPU
, exit_wait_state
),
120 STATS_DESC_COUNTER(VCPU
, inject_ckc
),
121 STATS_DESC_COUNTER(VCPU
, inject_cputm
),
122 STATS_DESC_COUNTER(VCPU
, inject_external_call
),
123 STATS_DESC_COUNTER(VCPU
, inject_emergency_signal
),
124 STATS_DESC_COUNTER(VCPU
, inject_mchk
),
125 STATS_DESC_COUNTER(VCPU
, inject_pfault_init
),
126 STATS_DESC_COUNTER(VCPU
, inject_program
),
127 STATS_DESC_COUNTER(VCPU
, inject_restart
),
128 STATS_DESC_COUNTER(VCPU
, inject_set_prefix
),
129 STATS_DESC_COUNTER(VCPU
, inject_stop_signal
),
130 STATS_DESC_COUNTER(VCPU
, instruction_epsw
),
131 STATS_DESC_COUNTER(VCPU
, instruction_gs
),
132 STATS_DESC_COUNTER(VCPU
, instruction_io_other
),
133 STATS_DESC_COUNTER(VCPU
, instruction_lpsw
),
134 STATS_DESC_COUNTER(VCPU
, instruction_lpswe
),
135 STATS_DESC_COUNTER(VCPU
, instruction_pfmf
),
136 STATS_DESC_COUNTER(VCPU
, instruction_ptff
),
137 STATS_DESC_COUNTER(VCPU
, instruction_sck
),
138 STATS_DESC_COUNTER(VCPU
, instruction_sckpf
),
139 STATS_DESC_COUNTER(VCPU
, instruction_stidp
),
140 STATS_DESC_COUNTER(VCPU
, instruction_spx
),
141 STATS_DESC_COUNTER(VCPU
, instruction_stpx
),
142 STATS_DESC_COUNTER(VCPU
, instruction_stap
),
143 STATS_DESC_COUNTER(VCPU
, instruction_iske
),
144 STATS_DESC_COUNTER(VCPU
, instruction_ri
),
145 STATS_DESC_COUNTER(VCPU
, instruction_rrbe
),
146 STATS_DESC_COUNTER(VCPU
, instruction_sske
),
147 STATS_DESC_COUNTER(VCPU
, instruction_ipte_interlock
),
148 STATS_DESC_COUNTER(VCPU
, instruction_stsi
),
149 STATS_DESC_COUNTER(VCPU
, instruction_stfl
),
150 STATS_DESC_COUNTER(VCPU
, instruction_tb
),
151 STATS_DESC_COUNTER(VCPU
, instruction_tpi
),
152 STATS_DESC_COUNTER(VCPU
, instruction_tprot
),
153 STATS_DESC_COUNTER(VCPU
, instruction_tsch
),
154 STATS_DESC_COUNTER(VCPU
, instruction_sie
),
155 STATS_DESC_COUNTER(VCPU
, instruction_essa
),
156 STATS_DESC_COUNTER(VCPU
, instruction_sthyi
),
157 STATS_DESC_COUNTER(VCPU
, instruction_sigp_sense
),
158 STATS_DESC_COUNTER(VCPU
, instruction_sigp_sense_running
),
159 STATS_DESC_COUNTER(VCPU
, instruction_sigp_external_call
),
160 STATS_DESC_COUNTER(VCPU
, instruction_sigp_emergency
),
161 STATS_DESC_COUNTER(VCPU
, instruction_sigp_cond_emergency
),
162 STATS_DESC_COUNTER(VCPU
, instruction_sigp_start
),
163 STATS_DESC_COUNTER(VCPU
, instruction_sigp_stop
),
164 STATS_DESC_COUNTER(VCPU
, instruction_sigp_stop_store_status
),
165 STATS_DESC_COUNTER(VCPU
, instruction_sigp_store_status
),
166 STATS_DESC_COUNTER(VCPU
, instruction_sigp_store_adtl_status
),
167 STATS_DESC_COUNTER(VCPU
, instruction_sigp_arch
),
168 STATS_DESC_COUNTER(VCPU
, instruction_sigp_prefix
),
169 STATS_DESC_COUNTER(VCPU
, instruction_sigp_restart
),
170 STATS_DESC_COUNTER(VCPU
, instruction_sigp_init_cpu_reset
),
171 STATS_DESC_COUNTER(VCPU
, instruction_sigp_cpu_reset
),
172 STATS_DESC_COUNTER(VCPU
, instruction_sigp_unknown
),
173 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_10
),
174 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_44
),
175 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_9c
),
176 STATS_DESC_COUNTER(VCPU
, diag_9c_ignored
),
177 STATS_DESC_COUNTER(VCPU
, diag_9c_forward
),
178 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_258
),
179 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_308
),
180 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_500
),
181 STATS_DESC_COUNTER(VCPU
, instruction_diagnose_other
),
182 STATS_DESC_COUNTER(VCPU
, pfault_sync
)
185 const struct kvm_stats_header kvm_vcpu_stats_header
= {
186 .name_size
= KVM_STATS_NAME_SIZE
,
187 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
188 .id_offset
= sizeof(struct kvm_stats_header
),
189 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
190 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
191 sizeof(kvm_vcpu_stats_desc
),
194 /* allow nested virtualization in KVM (if enabled by user space) */
196 module_param(nested
, int, S_IRUGO
);
197 MODULE_PARM_DESC(nested
, "Nested virtualization support");
199 /* allow 1m huge page guest backing, if !nested */
201 module_param(hpage
, int, 0444);
202 MODULE_PARM_DESC(hpage
, "1m huge page backing support");
204 /* maximum percentage of steal time for polling. >100 is treated like 100 */
205 static u8 halt_poll_max_steal
= 10;
206 module_param(halt_poll_max_steal
, byte
, 0644);
207 MODULE_PARM_DESC(halt_poll_max_steal
, "Maximum percentage of steal time to allow polling");
209 /* if set to true, the GISA will be initialized and used if available */
210 static bool use_gisa
= true;
211 module_param(use_gisa
, bool, 0644);
212 MODULE_PARM_DESC(use_gisa
, "Use the GISA if the host supports it.");
214 /* maximum diag9c forwarding per second */
215 unsigned int diag9c_forwarding_hz
;
216 module_param(diag9c_forwarding_hz
, uint
, 0644);
217 MODULE_PARM_DESC(diag9c_forwarding_hz
, "Maximum diag9c forwarding per second, 0 to turn off");
220 * allow asynchronous deinit for protected guests; enable by default since
221 * the feature is opt-in anyway
223 static int async_destroy
= 1;
224 module_param(async_destroy
, int, 0444);
225 MODULE_PARM_DESC(async_destroy
, "Asynchronous destroy for protected guests");
228 * For now we handle at most 16 double words as this is what the s390 base
229 * kernel handles and stores in the prefix page. If we ever need to go beyond
230 * this, this requires changes to code, but the external uapi can stay.
232 #define SIZE_INTERNAL 16
235 * Base feature mask that defines default mask for facilities. Consists of the
236 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
238 static unsigned long kvm_s390_fac_base
[SIZE_INTERNAL
] = { FACILITIES_KVM
};
240 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
241 * and defines the facilities that can be enabled via a cpu model.
243 static unsigned long kvm_s390_fac_ext
[SIZE_INTERNAL
] = { FACILITIES_KVM_CPUMODEL
};
245 static unsigned long kvm_s390_fac_size(void)
247 BUILD_BUG_ON(SIZE_INTERNAL
> S390_ARCH_FAC_MASK_SIZE_U64
);
248 BUILD_BUG_ON(SIZE_INTERNAL
> S390_ARCH_FAC_LIST_SIZE_U64
);
249 BUILD_BUG_ON(SIZE_INTERNAL
* sizeof(unsigned long) >
250 sizeof(stfle_fac_list
));
252 return SIZE_INTERNAL
;
255 /* available cpu features supported by kvm */
256 static DECLARE_BITMAP(kvm_s390_available_cpu_feat
, KVM_S390_VM_CPU_FEAT_NR_BITS
);
257 /* available subfunctions indicated via query / "test bit" */
258 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc
;
260 static struct gmap_notifier gmap_notifier
;
261 static struct gmap_notifier vsie_gmap_notifier
;
262 debug_info_t
*kvm_s390_dbf
;
263 debug_info_t
*kvm_s390_dbf_uv
;
265 /* Section: not file related */
266 /* forward declarations */
267 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
269 static int sca_switch_to_extended(struct kvm
*kvm
);
271 static void kvm_clock_sync_scb(struct kvm_s390_sie_block
*scb
, u64 delta
)
276 * The TOD jumps by delta, we have to compensate this by adding
277 * -delta to the epoch.
281 /* sign-extension - we're adding to signed values below */
286 if (scb
->ecd
& ECD_MEF
) {
287 scb
->epdx
+= delta_idx
;
288 if (scb
->epoch
< delta
)
294 * This callback is executed during stop_machine(). All CPUs are therefore
295 * temporarily stopped. In order not to change guest behavior, we have to
296 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
297 * so a CPU won't be stopped while calculating with the epoch.
299 static int kvm_clock_sync(struct notifier_block
*notifier
, unsigned long val
,
303 struct kvm_vcpu
*vcpu
;
305 unsigned long long *delta
= v
;
307 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
308 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
309 kvm_clock_sync_scb(vcpu
->arch
.sie_block
, *delta
);
311 kvm
->arch
.epoch
= vcpu
->arch
.sie_block
->epoch
;
312 kvm
->arch
.epdx
= vcpu
->arch
.sie_block
->epdx
;
314 if (vcpu
->arch
.cputm_enabled
)
315 vcpu
->arch
.cputm_start
+= *delta
;
316 if (vcpu
->arch
.vsie_block
)
317 kvm_clock_sync_scb(vcpu
->arch
.vsie_block
,
324 static struct notifier_block kvm_clock_notifier
= {
325 .notifier_call
= kvm_clock_sync
,
328 static void allow_cpu_feat(unsigned long nr
)
330 set_bit_inv(nr
, kvm_s390_available_cpu_feat
);
333 static inline int plo_test_bit(unsigned char nr
)
335 unsigned long function
= (unsigned long)nr
| 0x100;
339 " lgr 0,%[function]\n"
340 /* Parameter registers are ignored for "test bit" */
345 : [function
] "d" (function
)
350 static __always_inline
void __insn32_query(unsigned int opcode
, u8
*query
)
355 /* Parameter registers are ignored */
356 " .insn rrf,%[opc] << 16,2,4,6,0\n"
358 : [query
] "d" ((unsigned long)query
), [opc
] "i" (opcode
)
359 : "cc", "memory", "0", "1");
362 #define INSN_SORTL 0xb938
363 #define INSN_DFLTCC 0xb939
365 static void __init
kvm_s390_cpu_feat_init(void)
369 for (i
= 0; i
< 256; ++i
) {
371 kvm_s390_available_subfunc
.plo
[i
>> 3] |= 0x80 >> (i
& 7);
374 if (test_facility(28)) /* TOD-clock steering */
375 ptff(kvm_s390_available_subfunc
.ptff
,
376 sizeof(kvm_s390_available_subfunc
.ptff
),
379 if (test_facility(17)) { /* MSA */
380 __cpacf_query(CPACF_KMAC
, (cpacf_mask_t
*)
381 kvm_s390_available_subfunc
.kmac
);
382 __cpacf_query(CPACF_KMC
, (cpacf_mask_t
*)
383 kvm_s390_available_subfunc
.kmc
);
384 __cpacf_query(CPACF_KM
, (cpacf_mask_t
*)
385 kvm_s390_available_subfunc
.km
);
386 __cpacf_query(CPACF_KIMD
, (cpacf_mask_t
*)
387 kvm_s390_available_subfunc
.kimd
);
388 __cpacf_query(CPACF_KLMD
, (cpacf_mask_t
*)
389 kvm_s390_available_subfunc
.klmd
);
391 if (test_facility(76)) /* MSA3 */
392 __cpacf_query(CPACF_PCKMO
, (cpacf_mask_t
*)
393 kvm_s390_available_subfunc
.pckmo
);
394 if (test_facility(77)) { /* MSA4 */
395 __cpacf_query(CPACF_KMCTR
, (cpacf_mask_t
*)
396 kvm_s390_available_subfunc
.kmctr
);
397 __cpacf_query(CPACF_KMF
, (cpacf_mask_t
*)
398 kvm_s390_available_subfunc
.kmf
);
399 __cpacf_query(CPACF_KMO
, (cpacf_mask_t
*)
400 kvm_s390_available_subfunc
.kmo
);
401 __cpacf_query(CPACF_PCC
, (cpacf_mask_t
*)
402 kvm_s390_available_subfunc
.pcc
);
404 if (test_facility(57)) /* MSA5 */
405 __cpacf_query(CPACF_PRNO
, (cpacf_mask_t
*)
406 kvm_s390_available_subfunc
.ppno
);
408 if (test_facility(146)) /* MSA8 */
409 __cpacf_query(CPACF_KMA
, (cpacf_mask_t
*)
410 kvm_s390_available_subfunc
.kma
);
412 if (test_facility(155)) /* MSA9 */
413 __cpacf_query(CPACF_KDSA
, (cpacf_mask_t
*)
414 kvm_s390_available_subfunc
.kdsa
);
416 if (test_facility(150)) /* SORTL */
417 __insn32_query(INSN_SORTL
, kvm_s390_available_subfunc
.sortl
);
419 if (test_facility(151)) /* DFLTCC */
420 __insn32_query(INSN_DFLTCC
, kvm_s390_available_subfunc
.dfltcc
);
422 if (MACHINE_HAS_ESOP
)
423 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP
);
425 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
426 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
428 if (!sclp
.has_sief2
|| !MACHINE_HAS_ESOP
|| !sclp
.has_64bscao
||
429 !test_facility(3) || !nested
)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2
);
432 if (sclp
.has_64bscao
)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO
);
435 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF
);
437 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE
);
439 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS
);
441 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB
);
443 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI
);
445 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS
);
447 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS
);
449 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
450 * all skey handling functions read/set the skey from the PGSTE
451 * instead of the real storage key.
453 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
454 * pages being detected as preserved although they are resident.
456 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
457 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
459 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
460 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
461 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
463 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
464 * cannot easily shadow the SCA because of the ipte lock.
468 static int __init
__kvm_s390_init(void)
472 kvm_s390_dbf
= debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
476 kvm_s390_dbf_uv
= debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
477 if (!kvm_s390_dbf_uv
)
480 if (debug_register_view(kvm_s390_dbf
, &debug_sprintf_view
) ||
481 debug_register_view(kvm_s390_dbf_uv
, &debug_sprintf_view
))
484 kvm_s390_cpu_feat_init();
486 /* Register floating interrupt controller interface. */
487 rc
= kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
489 pr_err("A FLIC registration call failed with rc=%d\n", rc
);
493 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
)) {
494 rc
= kvm_s390_pci_init();
496 pr_err("Unable to allocate AIFT for PCI\n");
501 rc
= kvm_s390_gib_init(GAL_ISC
);
505 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
506 gmap_register_pte_notifier(&gmap_notifier
);
507 vsie_gmap_notifier
.notifier_call
= kvm_s390_vsie_gmap_notifier
;
508 gmap_register_pte_notifier(&vsie_gmap_notifier
);
509 atomic_notifier_chain_register(&s390_epoch_delta_notifier
,
510 &kvm_clock_notifier
);
515 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
))
520 debug_unregister(kvm_s390_dbf_uv
);
522 debug_unregister(kvm_s390_dbf
);
526 static void __kvm_s390_exit(void)
528 gmap_unregister_pte_notifier(&gmap_notifier
);
529 gmap_unregister_pte_notifier(&vsie_gmap_notifier
);
530 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier
,
531 &kvm_clock_notifier
);
533 kvm_s390_gib_destroy();
534 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
))
536 debug_unregister(kvm_s390_dbf
);
537 debug_unregister(kvm_s390_dbf_uv
);
540 /* Section: device related */
541 long kvm_arch_dev_ioctl(struct file
*filp
,
542 unsigned int ioctl
, unsigned long arg
)
544 if (ioctl
== KVM_S390_ENABLE_SIE
)
545 return s390_enable_sie();
549 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
554 case KVM_CAP_S390_PSW
:
555 case KVM_CAP_S390_GMAP
:
556 case KVM_CAP_SYNC_MMU
:
557 #ifdef CONFIG_KVM_S390_UCONTROL
558 case KVM_CAP_S390_UCONTROL
:
560 case KVM_CAP_ASYNC_PF
:
561 case KVM_CAP_SYNC_REGS
:
562 case KVM_CAP_ONE_REG
:
563 case KVM_CAP_ENABLE_CAP
:
564 case KVM_CAP_S390_CSS_SUPPORT
:
565 case KVM_CAP_IOEVENTFD
:
566 case KVM_CAP_S390_IRQCHIP
:
567 case KVM_CAP_VM_ATTRIBUTES
:
568 case KVM_CAP_MP_STATE
:
569 case KVM_CAP_IMMEDIATE_EXIT
:
570 case KVM_CAP_S390_INJECT_IRQ
:
571 case KVM_CAP_S390_USER_SIGP
:
572 case KVM_CAP_S390_USER_STSI
:
573 case KVM_CAP_S390_SKEYS
:
574 case KVM_CAP_S390_IRQ_STATE
:
575 case KVM_CAP_S390_USER_INSTR0
:
576 case KVM_CAP_S390_CMMA_MIGRATION
:
577 case KVM_CAP_S390_AIS
:
578 case KVM_CAP_S390_AIS_MIGRATION
:
579 case KVM_CAP_S390_VCPU_RESETS
:
580 case KVM_CAP_SET_GUEST_DEBUG
:
581 case KVM_CAP_S390_DIAG318
:
582 case KVM_CAP_IRQFD_RESAMPLE
:
585 case KVM_CAP_SET_GUEST_DEBUG2
:
586 r
= KVM_GUESTDBG_VALID_MASK
;
588 case KVM_CAP_S390_HPAGE_1M
:
590 if (hpage
&& !kvm_is_ucontrol(kvm
))
593 case KVM_CAP_S390_MEM_OP
:
596 case KVM_CAP_S390_MEM_OP_EXTENSION
:
598 * Flag bits indicating which extensions are supported.
599 * If r > 0, the base extension must also be supported/indicated,
600 * in order to maintain backwards compatibility.
602 r
= KVM_S390_MEMOP_EXTENSION_CAP_BASE
|
603 KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG
;
605 case KVM_CAP_NR_VCPUS
:
606 case KVM_CAP_MAX_VCPUS
:
607 case KVM_CAP_MAX_VCPU_ID
:
608 r
= KVM_S390_BSCA_CPU_SLOTS
;
609 if (!kvm_s390_use_sca_entries())
611 else if (sclp
.has_esca
&& sclp
.has_64bscao
)
612 r
= KVM_S390_ESCA_CPU_SLOTS
;
613 if (ext
== KVM_CAP_NR_VCPUS
)
614 r
= min_t(unsigned int, num_online_cpus(), r
);
616 case KVM_CAP_S390_COW
:
617 r
= MACHINE_HAS_ESOP
;
619 case KVM_CAP_S390_VECTOR_REGISTERS
:
620 r
= test_facility(129);
622 case KVM_CAP_S390_RI
:
623 r
= test_facility(64);
625 case KVM_CAP_S390_GS
:
626 r
= test_facility(133);
628 case KVM_CAP_S390_BPB
:
629 r
= test_facility(82);
631 case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE
:
632 r
= async_destroy
&& is_prot_virt_host();
634 case KVM_CAP_S390_PROTECTED
:
635 r
= is_prot_virt_host();
637 case KVM_CAP_S390_PROTECTED_DUMP
: {
638 u64 pv_cmds_dump
[] = {
639 BIT_UVC_CMD_DUMP_INIT
,
640 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE
,
641 BIT_UVC_CMD_DUMP_CPU
,
642 BIT_UVC_CMD_DUMP_COMPLETE
,
646 r
= is_prot_virt_host();
648 for (i
= 0; i
< ARRAY_SIZE(pv_cmds_dump
); i
++) {
649 if (!test_bit_inv(pv_cmds_dump
[i
],
650 (unsigned long *)&uv_info
.inst_calls_list
)) {
657 case KVM_CAP_S390_ZPCI_OP
:
658 r
= kvm_s390_pci_interp_allowed();
660 case KVM_CAP_S390_CPU_TOPOLOGY
:
661 r
= test_facility(11);
669 void kvm_arch_sync_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
672 gfn_t cur_gfn
, last_gfn
;
673 unsigned long gaddr
, vmaddr
;
674 struct gmap
*gmap
= kvm
->arch
.gmap
;
675 DECLARE_BITMAP(bitmap
, _PAGE_ENTRIES
);
677 /* Loop over all guest segments */
678 cur_gfn
= memslot
->base_gfn
;
679 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
680 for (; cur_gfn
<= last_gfn
; cur_gfn
+= _PAGE_ENTRIES
) {
681 gaddr
= gfn_to_gpa(cur_gfn
);
682 vmaddr
= gfn_to_hva_memslot(memslot
, cur_gfn
);
683 if (kvm_is_error_hva(vmaddr
))
686 bitmap_zero(bitmap
, _PAGE_ENTRIES
);
687 gmap_sync_dirty_log_pmd(gmap
, bitmap
, gaddr
, vmaddr
);
688 for (i
= 0; i
< _PAGE_ENTRIES
; i
++) {
689 if (test_bit(i
, bitmap
))
690 mark_page_dirty(kvm
, cur_gfn
+ i
);
693 if (fatal_signal_pending(current
))
699 /* Section: vm related */
700 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
);
703 * Get (and clear) the dirty memory log for a memory slot.
705 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
706 struct kvm_dirty_log
*log
)
710 struct kvm_memory_slot
*memslot
;
713 if (kvm_is_ucontrol(kvm
))
716 mutex_lock(&kvm
->slots_lock
);
719 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
722 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
, &memslot
);
726 /* Clear the dirty log */
728 n
= kvm_dirty_bitmap_bytes(memslot
);
729 memset(memslot
->dirty_bitmap
, 0, n
);
733 mutex_unlock(&kvm
->slots_lock
);
737 static void icpt_operexc_on_all_vcpus(struct kvm
*kvm
)
740 struct kvm_vcpu
*vcpu
;
742 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
743 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC
, vcpu
);
747 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
755 case KVM_CAP_S390_IRQCHIP
:
756 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
757 kvm
->arch
.use_irqchip
= 1;
760 case KVM_CAP_S390_USER_SIGP
:
761 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
762 kvm
->arch
.user_sigp
= 1;
765 case KVM_CAP_S390_VECTOR_REGISTERS
:
766 mutex_lock(&kvm
->lock
);
767 if (kvm
->created_vcpus
) {
769 } else if (cpu_has_vx()) {
770 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 129);
771 set_kvm_facility(kvm
->arch
.model
.fac_list
, 129);
772 if (test_facility(134)) {
773 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 134);
774 set_kvm_facility(kvm
->arch
.model
.fac_list
, 134);
776 if (test_facility(135)) {
777 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 135);
778 set_kvm_facility(kvm
->arch
.model
.fac_list
, 135);
780 if (test_facility(148)) {
781 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 148);
782 set_kvm_facility(kvm
->arch
.model
.fac_list
, 148);
784 if (test_facility(152)) {
785 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 152);
786 set_kvm_facility(kvm
->arch
.model
.fac_list
, 152);
788 if (test_facility(192)) {
789 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 192);
790 set_kvm_facility(kvm
->arch
.model
.fac_list
, 192);
795 mutex_unlock(&kvm
->lock
);
796 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
797 r
? "(not available)" : "(success)");
799 case KVM_CAP_S390_RI
:
801 mutex_lock(&kvm
->lock
);
802 if (kvm
->created_vcpus
) {
804 } else if (test_facility(64)) {
805 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 64);
806 set_kvm_facility(kvm
->arch
.model
.fac_list
, 64);
809 mutex_unlock(&kvm
->lock
);
810 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_RI %s",
811 r
? "(not available)" : "(success)");
813 case KVM_CAP_S390_AIS
:
814 mutex_lock(&kvm
->lock
);
815 if (kvm
->created_vcpus
) {
818 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 72);
819 set_kvm_facility(kvm
->arch
.model
.fac_list
, 72);
822 mutex_unlock(&kvm
->lock
);
823 VM_EVENT(kvm
, 3, "ENABLE: AIS %s",
824 r
? "(not available)" : "(success)");
826 case KVM_CAP_S390_GS
:
828 mutex_lock(&kvm
->lock
);
829 if (kvm
->created_vcpus
) {
831 } else if (test_facility(133)) {
832 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 133);
833 set_kvm_facility(kvm
->arch
.model
.fac_list
, 133);
836 mutex_unlock(&kvm
->lock
);
837 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_GS %s",
838 r
? "(not available)" : "(success)");
840 case KVM_CAP_S390_HPAGE_1M
:
841 mutex_lock(&kvm
->lock
);
842 if (kvm
->created_vcpus
)
844 else if (!hpage
|| kvm
->arch
.use_cmma
|| kvm_is_ucontrol(kvm
))
848 mmap_write_lock(kvm
->mm
);
849 kvm
->mm
->context
.allow_gmap_hpage_1m
= 1;
850 mmap_write_unlock(kvm
->mm
);
852 * We might have to create fake 4k page
853 * tables. To avoid that the hardware works on
854 * stale PGSTEs, we emulate these instructions.
856 kvm
->arch
.use_skf
= 0;
857 kvm
->arch
.use_pfmfi
= 0;
859 mutex_unlock(&kvm
->lock
);
860 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_HPAGE %s",
861 r
? "(not available)" : "(success)");
863 case KVM_CAP_S390_USER_STSI
:
864 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
865 kvm
->arch
.user_stsi
= 1;
868 case KVM_CAP_S390_USER_INSTR0
:
869 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
870 kvm
->arch
.user_instr0
= 1;
871 icpt_operexc_on_all_vcpus(kvm
);
874 case KVM_CAP_S390_CPU_TOPOLOGY
:
876 mutex_lock(&kvm
->lock
);
877 if (kvm
->created_vcpus
) {
879 } else if (test_facility(11)) {
880 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 11);
881 set_kvm_facility(kvm
->arch
.model
.fac_list
, 11);
884 mutex_unlock(&kvm
->lock
);
885 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
886 r
? "(not available)" : "(success)");
895 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
899 switch (attr
->attr
) {
900 case KVM_S390_VM_MEM_LIMIT_SIZE
:
902 VM_EVENT(kvm
, 3, "QUERY: max guest memory: %lu bytes",
903 kvm
->arch
.mem_limit
);
904 if (put_user(kvm
->arch
.mem_limit
, (u64 __user
*)attr
->addr
))
914 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
918 switch (attr
->attr
) {
919 case KVM_S390_VM_MEM_ENABLE_CMMA
:
924 VM_EVENT(kvm
, 3, "%s", "ENABLE: CMMA support");
925 mutex_lock(&kvm
->lock
);
926 if (kvm
->created_vcpus
)
928 else if (kvm
->mm
->context
.allow_gmap_hpage_1m
)
931 kvm
->arch
.use_cmma
= 1;
932 /* Not compatible with cmma. */
933 kvm
->arch
.use_pfmfi
= 0;
936 mutex_unlock(&kvm
->lock
);
938 case KVM_S390_VM_MEM_CLR_CMMA
:
943 if (!kvm
->arch
.use_cmma
)
946 VM_EVENT(kvm
, 3, "%s", "RESET: CMMA states");
947 mutex_lock(&kvm
->lock
);
948 idx
= srcu_read_lock(&kvm
->srcu
);
949 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
950 srcu_read_unlock(&kvm
->srcu
, idx
);
951 mutex_unlock(&kvm
->lock
);
954 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
955 unsigned long new_limit
;
957 if (kvm_is_ucontrol(kvm
))
960 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
963 if (kvm
->arch
.mem_limit
!= KVM_S390_NO_MEM_LIMIT
&&
964 new_limit
> kvm
->arch
.mem_limit
)
970 /* gmap_create takes last usable address */
971 if (new_limit
!= KVM_S390_NO_MEM_LIMIT
)
975 mutex_lock(&kvm
->lock
);
976 if (!kvm
->created_vcpus
) {
977 /* gmap_create will round the limit up */
978 struct gmap
*new = gmap_create(current
->mm
, new_limit
);
983 gmap_remove(kvm
->arch
.gmap
);
985 kvm
->arch
.gmap
= new;
989 mutex_unlock(&kvm
->lock
);
990 VM_EVENT(kvm
, 3, "SET: max guest address: %lu", new_limit
);
991 VM_EVENT(kvm
, 3, "New guest asce: 0x%pK",
992 (void *) kvm
->arch
.gmap
->asce
);
1002 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
1004 void kvm_s390_vcpu_crypto_reset_all(struct kvm
*kvm
)
1006 struct kvm_vcpu
*vcpu
;
1009 kvm_s390_vcpu_block_all(kvm
);
1011 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1012 kvm_s390_vcpu_crypto_setup(vcpu
);
1013 /* recreate the shadow crycb by leaving the VSIE handler */
1014 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART
, vcpu
);
1017 kvm_s390_vcpu_unblock_all(kvm
);
1020 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1022 mutex_lock(&kvm
->lock
);
1023 switch (attr
->attr
) {
1024 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
1025 if (!test_kvm_facility(kvm
, 76)) {
1026 mutex_unlock(&kvm
->lock
);
1030 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
1031 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
1032 kvm
->arch
.crypto
.aes_kw
= 1;
1033 VM_EVENT(kvm
, 3, "%s", "ENABLE: AES keywrapping support");
1035 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
1036 if (!test_kvm_facility(kvm
, 76)) {
1037 mutex_unlock(&kvm
->lock
);
1041 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
1042 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
1043 kvm
->arch
.crypto
.dea_kw
= 1;
1044 VM_EVENT(kvm
, 3, "%s", "ENABLE: DEA keywrapping support");
1046 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
1047 if (!test_kvm_facility(kvm
, 76)) {
1048 mutex_unlock(&kvm
->lock
);
1051 kvm
->arch
.crypto
.aes_kw
= 0;
1052 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
1053 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
1054 VM_EVENT(kvm
, 3, "%s", "DISABLE: AES keywrapping support");
1056 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
1057 if (!test_kvm_facility(kvm
, 76)) {
1058 mutex_unlock(&kvm
->lock
);
1061 kvm
->arch
.crypto
.dea_kw
= 0;
1062 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
1063 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
1064 VM_EVENT(kvm
, 3, "%s", "DISABLE: DEA keywrapping support");
1066 case KVM_S390_VM_CRYPTO_ENABLE_APIE
:
1067 if (!ap_instructions_available()) {
1068 mutex_unlock(&kvm
->lock
);
1071 kvm
->arch
.crypto
.apie
= 1;
1073 case KVM_S390_VM_CRYPTO_DISABLE_APIE
:
1074 if (!ap_instructions_available()) {
1075 mutex_unlock(&kvm
->lock
);
1078 kvm
->arch
.crypto
.apie
= 0;
1081 mutex_unlock(&kvm
->lock
);
1085 kvm_s390_vcpu_crypto_reset_all(kvm
);
1086 mutex_unlock(&kvm
->lock
);
1090 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu
*vcpu
)
1092 /* Only set the ECB bits after guest requests zPCI interpretation */
1093 if (!vcpu
->kvm
->arch
.use_zpci_interp
)
1096 vcpu
->arch
.sie_block
->ecb2
|= ECB2_ZPCI_LSI
;
1097 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AISII
+ ECB3_AISI
;
1100 void kvm_s390_vcpu_pci_enable_interp(struct kvm
*kvm
)
1102 struct kvm_vcpu
*vcpu
;
1105 lockdep_assert_held(&kvm
->lock
);
1107 if (!kvm_s390_pci_interp_allowed())
1111 * If host is configured for PCI and the necessary facilities are
1112 * available, turn on interpretation for the life of this guest
1114 kvm
->arch
.use_zpci_interp
= 1;
1116 kvm_s390_vcpu_block_all(kvm
);
1118 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1119 kvm_s390_vcpu_pci_setup(vcpu
);
1120 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART
, vcpu
);
1123 kvm_s390_vcpu_unblock_all(kvm
);
1126 static void kvm_s390_sync_request_broadcast(struct kvm
*kvm
, int req
)
1129 struct kvm_vcpu
*vcpu
;
1131 kvm_for_each_vcpu(cx
, vcpu
, kvm
)
1132 kvm_s390_sync_request(req
, vcpu
);
1136 * Must be called with kvm->srcu held to avoid races on memslots, and with
1137 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1139 static int kvm_s390_vm_start_migration(struct kvm
*kvm
)
1141 struct kvm_memory_slot
*ms
;
1142 struct kvm_memslots
*slots
;
1143 unsigned long ram_pages
= 0;
1146 /* migration mode already enabled */
1147 if (kvm
->arch
.migration_mode
)
1149 slots
= kvm_memslots(kvm
);
1150 if (!slots
|| kvm_memslots_empty(slots
))
1153 if (!kvm
->arch
.use_cmma
) {
1154 kvm
->arch
.migration_mode
= 1;
1157 /* mark all the pages in active slots as dirty */
1158 kvm_for_each_memslot(ms
, bkt
, slots
) {
1159 if (!ms
->dirty_bitmap
)
1162 * The second half of the bitmap is only used on x86,
1163 * and would be wasted otherwise, so we put it to good
1164 * use here to keep track of the state of the storage
1167 memset(kvm_second_dirty_bitmap(ms
), 0xff, kvm_dirty_bitmap_bytes(ms
));
1168 ram_pages
+= ms
->npages
;
1170 atomic64_set(&kvm
->arch
.cmma_dirty_pages
, ram_pages
);
1171 kvm
->arch
.migration_mode
= 1;
1172 kvm_s390_sync_request_broadcast(kvm
, KVM_REQ_START_MIGRATION
);
1177 * Must be called with kvm->slots_lock to avoid races with ourselves and
1178 * kvm_s390_vm_start_migration.
1180 static int kvm_s390_vm_stop_migration(struct kvm
*kvm
)
1182 /* migration mode already disabled */
1183 if (!kvm
->arch
.migration_mode
)
1185 kvm
->arch
.migration_mode
= 0;
1186 if (kvm
->arch
.use_cmma
)
1187 kvm_s390_sync_request_broadcast(kvm
, KVM_REQ_STOP_MIGRATION
);
1191 static int kvm_s390_vm_set_migration(struct kvm
*kvm
,
1192 struct kvm_device_attr
*attr
)
1196 mutex_lock(&kvm
->slots_lock
);
1197 switch (attr
->attr
) {
1198 case KVM_S390_VM_MIGRATION_START
:
1199 res
= kvm_s390_vm_start_migration(kvm
);
1201 case KVM_S390_VM_MIGRATION_STOP
:
1202 res
= kvm_s390_vm_stop_migration(kvm
);
1207 mutex_unlock(&kvm
->slots_lock
);
1212 static int kvm_s390_vm_get_migration(struct kvm
*kvm
,
1213 struct kvm_device_attr
*attr
)
1215 u64 mig
= kvm
->arch
.migration_mode
;
1217 if (attr
->attr
!= KVM_S390_VM_MIGRATION_STATUS
)
1220 if (copy_to_user((void __user
*)attr
->addr
, &mig
, sizeof(mig
)))
1225 static void __kvm_s390_set_tod_clock(struct kvm
*kvm
, const struct kvm_s390_vm_tod_clock
*gtod
);
1227 static int kvm_s390_set_tod_ext(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1229 struct kvm_s390_vm_tod_clock gtod
;
1231 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
1234 if (!test_kvm_facility(kvm
, 139) && gtod
.epoch_idx
)
1236 __kvm_s390_set_tod_clock(kvm
, >od
);
1238 VM_EVENT(kvm
, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1239 gtod
.epoch_idx
, gtod
.tod
);
1244 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1248 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
1254 VM_EVENT(kvm
, 3, "SET: TOD extension: 0x%x", gtod_high
);
1259 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1261 struct kvm_s390_vm_tod_clock gtod
= { 0 };
1263 if (copy_from_user(>od
.tod
, (void __user
*)attr
->addr
,
1267 __kvm_s390_set_tod_clock(kvm
, >od
);
1268 VM_EVENT(kvm
, 3, "SET: TOD base: 0x%llx", gtod
.tod
);
1272 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1279 mutex_lock(&kvm
->lock
);
1281 * For protected guests, the TOD is managed by the ultravisor, so trying
1282 * to change it will never bring the expected results.
1284 if (kvm_s390_pv_is_protected(kvm
)) {
1289 switch (attr
->attr
) {
1290 case KVM_S390_VM_TOD_EXT
:
1291 ret
= kvm_s390_set_tod_ext(kvm
, attr
);
1293 case KVM_S390_VM_TOD_HIGH
:
1294 ret
= kvm_s390_set_tod_high(kvm
, attr
);
1296 case KVM_S390_VM_TOD_LOW
:
1297 ret
= kvm_s390_set_tod_low(kvm
, attr
);
1305 mutex_unlock(&kvm
->lock
);
1309 static void kvm_s390_get_tod_clock(struct kvm
*kvm
,
1310 struct kvm_s390_vm_tod_clock
*gtod
)
1312 union tod_clock clk
;
1316 store_tod_clock_ext(&clk
);
1318 gtod
->tod
= clk
.tod
+ kvm
->arch
.epoch
;
1319 gtod
->epoch_idx
= 0;
1320 if (test_kvm_facility(kvm
, 139)) {
1321 gtod
->epoch_idx
= clk
.ei
+ kvm
->arch
.epdx
;
1322 if (gtod
->tod
< clk
.tod
)
1323 gtod
->epoch_idx
+= 1;
1329 static int kvm_s390_get_tod_ext(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1331 struct kvm_s390_vm_tod_clock gtod
;
1333 memset(>od
, 0, sizeof(gtod
));
1334 kvm_s390_get_tod_clock(kvm
, >od
);
1335 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
1338 VM_EVENT(kvm
, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1339 gtod
.epoch_idx
, gtod
.tod
);
1343 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1347 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
1350 VM_EVENT(kvm
, 3, "QUERY: TOD extension: 0x%x", gtod_high
);
1355 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1359 gtod
= kvm_s390_get_tod_clock_fast(kvm
);
1360 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
1362 VM_EVENT(kvm
, 3, "QUERY: TOD base: 0x%llx", gtod
);
1367 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1374 switch (attr
->attr
) {
1375 case KVM_S390_VM_TOD_EXT
:
1376 ret
= kvm_s390_get_tod_ext(kvm
, attr
);
1378 case KVM_S390_VM_TOD_HIGH
:
1379 ret
= kvm_s390_get_tod_high(kvm
, attr
);
1381 case KVM_S390_VM_TOD_LOW
:
1382 ret
= kvm_s390_get_tod_low(kvm
, attr
);
1391 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1393 struct kvm_s390_vm_cpu_processor
*proc
;
1394 u16 lowest_ibc
, unblocked_ibc
;
1397 mutex_lock(&kvm
->lock
);
1398 if (kvm
->created_vcpus
) {
1402 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL_ACCOUNT
);
1407 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
1409 kvm
->arch
.model
.cpuid
= proc
->cpuid
;
1410 lowest_ibc
= sclp
.ibc
>> 16 & 0xfff;
1411 unblocked_ibc
= sclp
.ibc
& 0xfff;
1412 if (lowest_ibc
&& proc
->ibc
) {
1413 if (proc
->ibc
> unblocked_ibc
)
1414 kvm
->arch
.model
.ibc
= unblocked_ibc
;
1415 else if (proc
->ibc
< lowest_ibc
)
1416 kvm
->arch
.model
.ibc
= lowest_ibc
;
1418 kvm
->arch
.model
.ibc
= proc
->ibc
;
1420 memcpy(kvm
->arch
.model
.fac_list
, proc
->fac_list
,
1421 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1422 VM_EVENT(kvm
, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1423 kvm
->arch
.model
.ibc
,
1424 kvm
->arch
.model
.cpuid
);
1425 VM_EVENT(kvm
, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1426 kvm
->arch
.model
.fac_list
[0],
1427 kvm
->arch
.model
.fac_list
[1],
1428 kvm
->arch
.model
.fac_list
[2]);
1433 mutex_unlock(&kvm
->lock
);
1437 static int kvm_s390_set_processor_feat(struct kvm
*kvm
,
1438 struct kvm_device_attr
*attr
)
1440 struct kvm_s390_vm_cpu_feat data
;
1442 if (copy_from_user(&data
, (void __user
*)attr
->addr
, sizeof(data
)))
1444 if (!bitmap_subset((unsigned long *) data
.feat
,
1445 kvm_s390_available_cpu_feat
,
1446 KVM_S390_VM_CPU_FEAT_NR_BITS
))
1449 mutex_lock(&kvm
->lock
);
1450 if (kvm
->created_vcpus
) {
1451 mutex_unlock(&kvm
->lock
);
1454 bitmap_from_arr64(kvm
->arch
.cpu_feat
, data
.feat
, KVM_S390_VM_CPU_FEAT_NR_BITS
);
1455 mutex_unlock(&kvm
->lock
);
1456 VM_EVENT(kvm
, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1463 static int kvm_s390_set_processor_subfunc(struct kvm
*kvm
,
1464 struct kvm_device_attr
*attr
)
1466 mutex_lock(&kvm
->lock
);
1467 if (kvm
->created_vcpus
) {
1468 mutex_unlock(&kvm
->lock
);
1472 if (copy_from_user(&kvm
->arch
.model
.subfuncs
, (void __user
*)attr
->addr
,
1473 sizeof(struct kvm_s390_vm_cpu_subfunc
))) {
1474 mutex_unlock(&kvm
->lock
);
1477 mutex_unlock(&kvm
->lock
);
1479 VM_EVENT(kvm
, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1480 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[0],
1481 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[1],
1482 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[2],
1483 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[3]);
1484 VM_EVENT(kvm
, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1485 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ptff
)[0],
1486 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ptff
)[1]);
1487 VM_EVENT(kvm
, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1488 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmac
)[0],
1489 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmac
)[1]);
1490 VM_EVENT(kvm
, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1491 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmc
)[0],
1492 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmc
)[1]);
1493 VM_EVENT(kvm
, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1494 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.km
)[0],
1495 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.km
)[1]);
1496 VM_EVENT(kvm
, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1497 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kimd
)[0],
1498 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kimd
)[1]);
1499 VM_EVENT(kvm
, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1500 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.klmd
)[0],
1501 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.klmd
)[1]);
1502 VM_EVENT(kvm
, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1503 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pckmo
)[0],
1504 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pckmo
)[1]);
1505 VM_EVENT(kvm
, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmctr
)[0],
1507 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmctr
)[1]);
1508 VM_EVENT(kvm
, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmf
)[0],
1510 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmf
)[1]);
1511 VM_EVENT(kvm
, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmo
)[0],
1513 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmo
)[1]);
1514 VM_EVENT(kvm
, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pcc
)[0],
1516 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pcc
)[1]);
1517 VM_EVENT(kvm
, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ppno
)[0],
1519 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ppno
)[1]);
1520 VM_EVENT(kvm
, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kma
)[0],
1522 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kma
)[1]);
1523 VM_EVENT(kvm
, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kdsa
)[0],
1525 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kdsa
)[1]);
1526 VM_EVENT(kvm
, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[0],
1528 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[1],
1529 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[2],
1530 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[3]);
1531 VM_EVENT(kvm
, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1532 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[0],
1533 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[1],
1534 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[2],
1535 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[3]);
1540 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \
1542 ((struct kvm_s390_vm_cpu_uv_feat){ \
1549 static int kvm_s390_set_uv_feat(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1551 struct kvm_s390_vm_cpu_uv_feat __user
*ptr
= (void __user
*)attr
->addr
;
1552 unsigned long data
, filter
;
1554 filter
= uv_info
.uv_feature_indications
& KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK
;
1555 if (get_user(data
, &ptr
->feat
))
1557 if (!bitmap_subset(&data
, &filter
, KVM_S390_VM_CPU_UV_FEAT_NR_BITS
))
1560 mutex_lock(&kvm
->lock
);
1561 if (kvm
->created_vcpus
) {
1562 mutex_unlock(&kvm
->lock
);
1565 kvm
->arch
.model
.uv_feat_guest
.feat
= data
;
1566 mutex_unlock(&kvm
->lock
);
1568 VM_EVENT(kvm
, 3, "SET: guest UV-feat: 0x%16.16lx", data
);
1573 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1577 switch (attr
->attr
) {
1578 case KVM_S390_VM_CPU_PROCESSOR
:
1579 ret
= kvm_s390_set_processor(kvm
, attr
);
1581 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
1582 ret
= kvm_s390_set_processor_feat(kvm
, attr
);
1584 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
1585 ret
= kvm_s390_set_processor_subfunc(kvm
, attr
);
1587 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST
:
1588 ret
= kvm_s390_set_uv_feat(kvm
, attr
);
1594 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1596 struct kvm_s390_vm_cpu_processor
*proc
;
1599 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL_ACCOUNT
);
1604 proc
->cpuid
= kvm
->arch
.model
.cpuid
;
1605 proc
->ibc
= kvm
->arch
.model
.ibc
;
1606 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac_list
,
1607 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1608 VM_EVENT(kvm
, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1609 kvm
->arch
.model
.ibc
,
1610 kvm
->arch
.model
.cpuid
);
1611 VM_EVENT(kvm
, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1612 kvm
->arch
.model
.fac_list
[0],
1613 kvm
->arch
.model
.fac_list
[1],
1614 kvm
->arch
.model
.fac_list
[2]);
1615 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
1622 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1624 struct kvm_s390_vm_cpu_machine
*mach
;
1627 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL_ACCOUNT
);
1632 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
1633 mach
->ibc
= sclp
.ibc
;
1634 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac_mask
,
1635 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1636 memcpy((unsigned long *)&mach
->fac_list
, stfle_fac_list
,
1637 sizeof(stfle_fac_list
));
1638 VM_EVENT(kvm
, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1639 kvm
->arch
.model
.ibc
,
1640 kvm
->arch
.model
.cpuid
);
1641 VM_EVENT(kvm
, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1645 VM_EVENT(kvm
, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1649 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
1656 static int kvm_s390_get_processor_feat(struct kvm
*kvm
,
1657 struct kvm_device_attr
*attr
)
1659 struct kvm_s390_vm_cpu_feat data
;
1661 bitmap_to_arr64(data
.feat
, kvm
->arch
.cpu_feat
, KVM_S390_VM_CPU_FEAT_NR_BITS
);
1662 if (copy_to_user((void __user
*)attr
->addr
, &data
, sizeof(data
)))
1664 VM_EVENT(kvm
, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1671 static int kvm_s390_get_machine_feat(struct kvm
*kvm
,
1672 struct kvm_device_attr
*attr
)
1674 struct kvm_s390_vm_cpu_feat data
;
1676 bitmap_to_arr64(data
.feat
, kvm_s390_available_cpu_feat
, KVM_S390_VM_CPU_FEAT_NR_BITS
);
1677 if (copy_to_user((void __user
*)attr
->addr
, &data
, sizeof(data
)))
1679 VM_EVENT(kvm
, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1686 static int kvm_s390_get_processor_subfunc(struct kvm
*kvm
,
1687 struct kvm_device_attr
*attr
)
1689 if (copy_to_user((void __user
*)attr
->addr
, &kvm
->arch
.model
.subfuncs
,
1690 sizeof(struct kvm_s390_vm_cpu_subfunc
)))
1693 VM_EVENT(kvm
, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1694 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[0],
1695 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[1],
1696 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[2],
1697 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.plo
)[3]);
1698 VM_EVENT(kvm
, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1699 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ptff
)[0],
1700 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ptff
)[1]);
1701 VM_EVENT(kvm
, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1702 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmac
)[0],
1703 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmac
)[1]);
1704 VM_EVENT(kvm
, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1705 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmc
)[0],
1706 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmc
)[1]);
1707 VM_EVENT(kvm
, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1708 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.km
)[0],
1709 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.km
)[1]);
1710 VM_EVENT(kvm
, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1711 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kimd
)[0],
1712 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kimd
)[1]);
1713 VM_EVENT(kvm
, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1714 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.klmd
)[0],
1715 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.klmd
)[1]);
1716 VM_EVENT(kvm
, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1717 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pckmo
)[0],
1718 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pckmo
)[1]);
1719 VM_EVENT(kvm
, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1720 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmctr
)[0],
1721 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmctr
)[1]);
1722 VM_EVENT(kvm
, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1723 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmf
)[0],
1724 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmf
)[1]);
1725 VM_EVENT(kvm
, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1726 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmo
)[0],
1727 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kmo
)[1]);
1728 VM_EVENT(kvm
, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1729 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pcc
)[0],
1730 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.pcc
)[1]);
1731 VM_EVENT(kvm
, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1732 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ppno
)[0],
1733 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.ppno
)[1]);
1734 VM_EVENT(kvm
, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1735 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kma
)[0],
1736 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kma
)[1]);
1737 VM_EVENT(kvm
, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1738 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kdsa
)[0],
1739 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.kdsa
)[1]);
1740 VM_EVENT(kvm
, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1741 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[0],
1742 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[1],
1743 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[2],
1744 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.sortl
)[3]);
1745 VM_EVENT(kvm
, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1746 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[0],
1747 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[1],
1748 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[2],
1749 ((unsigned long *) &kvm
->arch
.model
.subfuncs
.dfltcc
)[3]);
1754 static int kvm_s390_get_machine_subfunc(struct kvm
*kvm
,
1755 struct kvm_device_attr
*attr
)
1757 if (copy_to_user((void __user
*)attr
->addr
, &kvm_s390_available_subfunc
,
1758 sizeof(struct kvm_s390_vm_cpu_subfunc
)))
1761 VM_EVENT(kvm
, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1762 ((unsigned long *) &kvm_s390_available_subfunc
.plo
)[0],
1763 ((unsigned long *) &kvm_s390_available_subfunc
.plo
)[1],
1764 ((unsigned long *) &kvm_s390_available_subfunc
.plo
)[2],
1765 ((unsigned long *) &kvm_s390_available_subfunc
.plo
)[3]);
1766 VM_EVENT(kvm
, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1767 ((unsigned long *) &kvm_s390_available_subfunc
.ptff
)[0],
1768 ((unsigned long *) &kvm_s390_available_subfunc
.ptff
)[1]);
1769 VM_EVENT(kvm
, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1770 ((unsigned long *) &kvm_s390_available_subfunc
.kmac
)[0],
1771 ((unsigned long *) &kvm_s390_available_subfunc
.kmac
)[1]);
1772 VM_EVENT(kvm
, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1773 ((unsigned long *) &kvm_s390_available_subfunc
.kmc
)[0],
1774 ((unsigned long *) &kvm_s390_available_subfunc
.kmc
)[1]);
1775 VM_EVENT(kvm
, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1776 ((unsigned long *) &kvm_s390_available_subfunc
.km
)[0],
1777 ((unsigned long *) &kvm_s390_available_subfunc
.km
)[1]);
1778 VM_EVENT(kvm
, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1779 ((unsigned long *) &kvm_s390_available_subfunc
.kimd
)[0],
1780 ((unsigned long *) &kvm_s390_available_subfunc
.kimd
)[1]);
1781 VM_EVENT(kvm
, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1782 ((unsigned long *) &kvm_s390_available_subfunc
.klmd
)[0],
1783 ((unsigned long *) &kvm_s390_available_subfunc
.klmd
)[1]);
1784 VM_EVENT(kvm
, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1785 ((unsigned long *) &kvm_s390_available_subfunc
.pckmo
)[0],
1786 ((unsigned long *) &kvm_s390_available_subfunc
.pckmo
)[1]);
1787 VM_EVENT(kvm
, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1788 ((unsigned long *) &kvm_s390_available_subfunc
.kmctr
)[0],
1789 ((unsigned long *) &kvm_s390_available_subfunc
.kmctr
)[1]);
1790 VM_EVENT(kvm
, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1791 ((unsigned long *) &kvm_s390_available_subfunc
.kmf
)[0],
1792 ((unsigned long *) &kvm_s390_available_subfunc
.kmf
)[1]);
1793 VM_EVENT(kvm
, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1794 ((unsigned long *) &kvm_s390_available_subfunc
.kmo
)[0],
1795 ((unsigned long *) &kvm_s390_available_subfunc
.kmo
)[1]);
1796 VM_EVENT(kvm
, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1797 ((unsigned long *) &kvm_s390_available_subfunc
.pcc
)[0],
1798 ((unsigned long *) &kvm_s390_available_subfunc
.pcc
)[1]);
1799 VM_EVENT(kvm
, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1800 ((unsigned long *) &kvm_s390_available_subfunc
.ppno
)[0],
1801 ((unsigned long *) &kvm_s390_available_subfunc
.ppno
)[1]);
1802 VM_EVENT(kvm
, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1803 ((unsigned long *) &kvm_s390_available_subfunc
.kma
)[0],
1804 ((unsigned long *) &kvm_s390_available_subfunc
.kma
)[1]);
1805 VM_EVENT(kvm
, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1806 ((unsigned long *) &kvm_s390_available_subfunc
.kdsa
)[0],
1807 ((unsigned long *) &kvm_s390_available_subfunc
.kdsa
)[1]);
1808 VM_EVENT(kvm
, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1809 ((unsigned long *) &kvm_s390_available_subfunc
.sortl
)[0],
1810 ((unsigned long *) &kvm_s390_available_subfunc
.sortl
)[1],
1811 ((unsigned long *) &kvm_s390_available_subfunc
.sortl
)[2],
1812 ((unsigned long *) &kvm_s390_available_subfunc
.sortl
)[3]);
1813 VM_EVENT(kvm
, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1814 ((unsigned long *) &kvm_s390_available_subfunc
.dfltcc
)[0],
1815 ((unsigned long *) &kvm_s390_available_subfunc
.dfltcc
)[1],
1816 ((unsigned long *) &kvm_s390_available_subfunc
.dfltcc
)[2],
1817 ((unsigned long *) &kvm_s390_available_subfunc
.dfltcc
)[3]);
1822 static int kvm_s390_get_processor_uv_feat(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1824 struct kvm_s390_vm_cpu_uv_feat __user
*dst
= (void __user
*)attr
->addr
;
1825 unsigned long feat
= kvm
->arch
.model
.uv_feat_guest
.feat
;
1827 if (put_user(feat
, &dst
->feat
))
1829 VM_EVENT(kvm
, 3, "GET: guest UV-feat: 0x%16.16lx", feat
);
1834 static int kvm_s390_get_machine_uv_feat(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1836 struct kvm_s390_vm_cpu_uv_feat __user
*dst
= (void __user
*)attr
->addr
;
1839 BUILD_BUG_ON(sizeof(*dst
) != sizeof(uv_info
.uv_feature_indications
));
1841 feat
= uv_info
.uv_feature_indications
& KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK
;
1842 if (put_user(feat
, &dst
->feat
))
1844 VM_EVENT(kvm
, 3, "GET: guest UV-feat: 0x%16.16lx", feat
);
1849 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1853 switch (attr
->attr
) {
1854 case KVM_S390_VM_CPU_PROCESSOR
:
1855 ret
= kvm_s390_get_processor(kvm
, attr
);
1857 case KVM_S390_VM_CPU_MACHINE
:
1858 ret
= kvm_s390_get_machine(kvm
, attr
);
1860 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
1861 ret
= kvm_s390_get_processor_feat(kvm
, attr
);
1863 case KVM_S390_VM_CPU_MACHINE_FEAT
:
1864 ret
= kvm_s390_get_machine_feat(kvm
, attr
);
1866 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
1867 ret
= kvm_s390_get_processor_subfunc(kvm
, attr
);
1869 case KVM_S390_VM_CPU_MACHINE_SUBFUNC
:
1870 ret
= kvm_s390_get_machine_subfunc(kvm
, attr
);
1872 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST
:
1873 ret
= kvm_s390_get_processor_uv_feat(kvm
, attr
);
1875 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST
:
1876 ret
= kvm_s390_get_machine_uv_feat(kvm
, attr
);
1883 * kvm_s390_update_topology_change_report - update CPU topology change report
1884 * @kvm: guest KVM description
1885 * @val: set or clear the MTCR bit
1887 * Updates the Multiprocessor Topology-Change-Report bit to signal
1888 * the guest with a topology change.
1889 * This is only relevant if the topology facility is present.
1891 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1893 static void kvm_s390_update_topology_change_report(struct kvm
*kvm
, bool val
)
1895 union sca_utility
new, old
;
1896 struct bsca_block
*sca
;
1898 read_lock(&kvm
->arch
.sca_lock
);
1899 sca
= kvm
->arch
.sca
;
1901 old
= READ_ONCE(sca
->utility
);
1904 } while (cmpxchg(&sca
->utility
.val
, old
.val
, new.val
) != old
.val
);
1905 read_unlock(&kvm
->arch
.sca_lock
);
1908 static int kvm_s390_set_topo_change_indication(struct kvm
*kvm
,
1909 struct kvm_device_attr
*attr
)
1911 if (!test_kvm_facility(kvm
, 11))
1914 kvm_s390_update_topology_change_report(kvm
, !!attr
->attr
);
1918 static int kvm_s390_get_topo_change_indication(struct kvm
*kvm
,
1919 struct kvm_device_attr
*attr
)
1923 if (!test_kvm_facility(kvm
, 11))
1926 read_lock(&kvm
->arch
.sca_lock
);
1927 topo
= ((struct bsca_block
*)kvm
->arch
.sca
)->utility
.mtcr
;
1928 read_unlock(&kvm
->arch
.sca_lock
);
1930 return put_user(topo
, (u8 __user
*)attr
->addr
);
1933 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1937 switch (attr
->group
) {
1938 case KVM_S390_VM_MEM_CTRL
:
1939 ret
= kvm_s390_set_mem_control(kvm
, attr
);
1941 case KVM_S390_VM_TOD
:
1942 ret
= kvm_s390_set_tod(kvm
, attr
);
1944 case KVM_S390_VM_CPU_MODEL
:
1945 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
1947 case KVM_S390_VM_CRYPTO
:
1948 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
1950 case KVM_S390_VM_MIGRATION
:
1951 ret
= kvm_s390_vm_set_migration(kvm
, attr
);
1953 case KVM_S390_VM_CPU_TOPOLOGY
:
1954 ret
= kvm_s390_set_topo_change_indication(kvm
, attr
);
1964 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1968 switch (attr
->group
) {
1969 case KVM_S390_VM_MEM_CTRL
:
1970 ret
= kvm_s390_get_mem_control(kvm
, attr
);
1972 case KVM_S390_VM_TOD
:
1973 ret
= kvm_s390_get_tod(kvm
, attr
);
1975 case KVM_S390_VM_CPU_MODEL
:
1976 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
1978 case KVM_S390_VM_MIGRATION
:
1979 ret
= kvm_s390_vm_get_migration(kvm
, attr
);
1981 case KVM_S390_VM_CPU_TOPOLOGY
:
1982 ret
= kvm_s390_get_topo_change_indication(kvm
, attr
);
1992 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1996 switch (attr
->group
) {
1997 case KVM_S390_VM_MEM_CTRL
:
1998 switch (attr
->attr
) {
1999 case KVM_S390_VM_MEM_ENABLE_CMMA
:
2000 case KVM_S390_VM_MEM_CLR_CMMA
:
2001 ret
= sclp
.has_cmma
? 0 : -ENXIO
;
2003 case KVM_S390_VM_MEM_LIMIT_SIZE
:
2011 case KVM_S390_VM_TOD
:
2012 switch (attr
->attr
) {
2013 case KVM_S390_VM_TOD_LOW
:
2014 case KVM_S390_VM_TOD_HIGH
:
2022 case KVM_S390_VM_CPU_MODEL
:
2023 switch (attr
->attr
) {
2024 case KVM_S390_VM_CPU_PROCESSOR
:
2025 case KVM_S390_VM_CPU_MACHINE
:
2026 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
2027 case KVM_S390_VM_CPU_MACHINE_FEAT
:
2028 case KVM_S390_VM_CPU_MACHINE_SUBFUNC
:
2029 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
2030 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST
:
2031 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST
:
2039 case KVM_S390_VM_CRYPTO
:
2040 switch (attr
->attr
) {
2041 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
2042 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
2043 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
2044 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
2047 case KVM_S390_VM_CRYPTO_ENABLE_APIE
:
2048 case KVM_S390_VM_CRYPTO_DISABLE_APIE
:
2049 ret
= ap_instructions_available() ? 0 : -ENXIO
;
2056 case KVM_S390_VM_MIGRATION
:
2059 case KVM_S390_VM_CPU_TOPOLOGY
:
2060 ret
= test_kvm_facility(kvm
, 11) ? 0 : -ENXIO
;
2070 static int kvm_s390_get_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
2074 int srcu_idx
, i
, r
= 0;
2076 if (args
->flags
!= 0)
2079 /* Is this guest using storage keys? */
2080 if (!mm_uses_skeys(current
->mm
))
2081 return KVM_S390_GET_SKEYS_NONE
;
2083 /* Enforce sane limit on memory allocation */
2084 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
2087 keys
= kvmalloc_array(args
->count
, sizeof(uint8_t), GFP_KERNEL_ACCOUNT
);
2091 mmap_read_lock(current
->mm
);
2092 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2093 for (i
= 0; i
< args
->count
; i
++) {
2094 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
2095 if (kvm_is_error_hva(hva
)) {
2100 r
= get_guest_storage_key(current
->mm
, hva
, &keys
[i
]);
2104 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2105 mmap_read_unlock(current
->mm
);
2108 r
= copy_to_user((uint8_t __user
*)args
->skeydata_addr
, keys
,
2109 sizeof(uint8_t) * args
->count
);
2118 static int kvm_s390_set_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
2122 int srcu_idx
, i
, r
= 0;
2125 if (args
->flags
!= 0)
2128 /* Enforce sane limit on memory allocation */
2129 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
2132 keys
= kvmalloc_array(args
->count
, sizeof(uint8_t), GFP_KERNEL_ACCOUNT
);
2136 r
= copy_from_user(keys
, (uint8_t __user
*)args
->skeydata_addr
,
2137 sizeof(uint8_t) * args
->count
);
2143 /* Enable storage key handling for the guest */
2144 r
= s390_enable_skey();
2149 mmap_read_lock(current
->mm
);
2150 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2151 while (i
< args
->count
) {
2153 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
2154 if (kvm_is_error_hva(hva
)) {
2159 /* Lowest order bit is reserved */
2160 if (keys
[i
] & 0x01) {
2165 r
= set_guest_storage_key(current
->mm
, hva
, keys
[i
], 0);
2167 r
= fixup_user_fault(current
->mm
, hva
,
2168 FAULT_FLAG_WRITE
, &unlocked
);
2175 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2176 mmap_read_unlock(current
->mm
);
2183 * Base address and length must be sent at the start of each block, therefore
2184 * it's cheaper to send some clean data, as long as it's less than the size of
2187 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2188 /* for consistency */
2189 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2191 static int kvm_s390_peek_cmma(struct kvm
*kvm
, struct kvm_s390_cmma_log
*args
,
2192 u8
*res
, unsigned long bufsize
)
2194 unsigned long pgstev
, hva
, cur_gfn
= args
->start_gfn
;
2197 while (args
->count
< bufsize
) {
2198 hva
= gfn_to_hva(kvm
, cur_gfn
);
2200 * We return an error if the first value was invalid, but we
2201 * return successfully if at least one value was copied.
2203 if (kvm_is_error_hva(hva
))
2204 return args
->count
? 0 : -EFAULT
;
2205 if (get_pgste(kvm
->mm
, hva
, &pgstev
) < 0)
2207 res
[args
->count
++] = (pgstev
>> 24) & 0x43;
2214 static struct kvm_memory_slot
*gfn_to_memslot_approx(struct kvm_memslots
*slots
,
2217 return ____gfn_to_memslot(slots
, gfn
, true);
2220 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots
*slots
,
2221 unsigned long cur_gfn
)
2223 struct kvm_memory_slot
*ms
= gfn_to_memslot_approx(slots
, cur_gfn
);
2224 unsigned long ofs
= cur_gfn
- ms
->base_gfn
;
2225 struct rb_node
*mnode
= &ms
->gfn_node
[slots
->node_idx
];
2227 if (ms
->base_gfn
+ ms
->npages
<= cur_gfn
) {
2228 mnode
= rb_next(mnode
);
2229 /* If we are above the highest slot, wrap around */
2231 mnode
= rb_first(&slots
->gfn_tree
);
2233 ms
= container_of(mnode
, struct kvm_memory_slot
, gfn_node
[slots
->node_idx
]);
2237 if (cur_gfn
< ms
->base_gfn
)
2240 ofs
= find_next_bit(kvm_second_dirty_bitmap(ms
), ms
->npages
, ofs
);
2241 while (ofs
>= ms
->npages
&& (mnode
= rb_next(mnode
))) {
2242 ms
= container_of(mnode
, struct kvm_memory_slot
, gfn_node
[slots
->node_idx
]);
2243 ofs
= find_first_bit(kvm_second_dirty_bitmap(ms
), ms
->npages
);
2245 return ms
->base_gfn
+ ofs
;
2248 static int kvm_s390_get_cmma(struct kvm
*kvm
, struct kvm_s390_cmma_log
*args
,
2249 u8
*res
, unsigned long bufsize
)
2251 unsigned long mem_end
, cur_gfn
, next_gfn
, hva
, pgstev
;
2252 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
2253 struct kvm_memory_slot
*ms
;
2255 if (unlikely(kvm_memslots_empty(slots
)))
2258 cur_gfn
= kvm_s390_next_dirty_cmma(slots
, args
->start_gfn
);
2259 ms
= gfn_to_memslot(kvm
, cur_gfn
);
2261 args
->start_gfn
= cur_gfn
;
2264 next_gfn
= kvm_s390_next_dirty_cmma(slots
, cur_gfn
+ 1);
2265 mem_end
= kvm_s390_get_gfn_end(slots
);
2267 while (args
->count
< bufsize
) {
2268 hva
= gfn_to_hva(kvm
, cur_gfn
);
2269 if (kvm_is_error_hva(hva
))
2271 /* Decrement only if we actually flipped the bit to 0 */
2272 if (test_and_clear_bit(cur_gfn
- ms
->base_gfn
, kvm_second_dirty_bitmap(ms
)))
2273 atomic64_dec(&kvm
->arch
.cmma_dirty_pages
);
2274 if (get_pgste(kvm
->mm
, hva
, &pgstev
) < 0)
2276 /* Save the value */
2277 res
[args
->count
++] = (pgstev
>> 24) & 0x43;
2278 /* If the next bit is too far away, stop. */
2279 if (next_gfn
> cur_gfn
+ KVM_S390_MAX_BIT_DISTANCE
)
2281 /* If we reached the previous "next", find the next one */
2282 if (cur_gfn
== next_gfn
)
2283 next_gfn
= kvm_s390_next_dirty_cmma(slots
, cur_gfn
+ 1);
2284 /* Reached the end of memory or of the buffer, stop */
2285 if ((next_gfn
>= mem_end
) ||
2286 (next_gfn
- args
->start_gfn
>= bufsize
))
2289 /* Reached the end of the current memslot, take the next one. */
2290 if (cur_gfn
- ms
->base_gfn
>= ms
->npages
) {
2291 ms
= gfn_to_memslot(kvm
, cur_gfn
);
2300 * This function searches for the next page with dirty CMMA attributes, and
2301 * saves the attributes in the buffer up to either the end of the buffer or
2302 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2303 * no trailing clean bytes are saved.
2304 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2305 * output buffer will indicate 0 as length.
2307 static int kvm_s390_get_cmma_bits(struct kvm
*kvm
,
2308 struct kvm_s390_cmma_log
*args
)
2310 unsigned long bufsize
;
2311 int srcu_idx
, peek
, ret
;
2314 if (!kvm
->arch
.use_cmma
)
2316 /* Invalid/unsupported flags were specified */
2317 if (args
->flags
& ~KVM_S390_CMMA_PEEK
)
2319 /* Migration mode query, and we are not doing a migration */
2320 peek
= !!(args
->flags
& KVM_S390_CMMA_PEEK
);
2321 if (!peek
&& !kvm
->arch
.migration_mode
)
2323 /* CMMA is disabled or was not used, or the buffer has length zero */
2324 bufsize
= min(args
->count
, KVM_S390_CMMA_SIZE_MAX
);
2325 if (!bufsize
|| !kvm
->mm
->context
.uses_cmm
) {
2326 memset(args
, 0, sizeof(*args
));
2329 /* We are not peeking, and there are no dirty pages */
2330 if (!peek
&& !atomic64_read(&kvm
->arch
.cmma_dirty_pages
)) {
2331 memset(args
, 0, sizeof(*args
));
2335 values
= vmalloc(bufsize
);
2339 mmap_read_lock(kvm
->mm
);
2340 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2342 ret
= kvm_s390_peek_cmma(kvm
, args
, values
, bufsize
);
2344 ret
= kvm_s390_get_cmma(kvm
, args
, values
, bufsize
);
2345 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2346 mmap_read_unlock(kvm
->mm
);
2348 if (kvm
->arch
.migration_mode
)
2349 args
->remaining
= atomic64_read(&kvm
->arch
.cmma_dirty_pages
);
2351 args
->remaining
= 0;
2353 if (copy_to_user((void __user
*)args
->values
, values
, args
->count
))
2361 * This function sets the CMMA attributes for the given pages. If the input
2362 * buffer has zero length, no action is taken, otherwise the attributes are
2363 * set and the mm->context.uses_cmm flag is set.
2365 static int kvm_s390_set_cmma_bits(struct kvm
*kvm
,
2366 const struct kvm_s390_cmma_log
*args
)
2368 unsigned long hva
, mask
, pgstev
, i
;
2370 int srcu_idx
, r
= 0;
2374 if (!kvm
->arch
.use_cmma
)
2376 /* invalid/unsupported flags */
2377 if (args
->flags
!= 0)
2379 /* Enforce sane limit on memory allocation */
2380 if (args
->count
> KVM_S390_CMMA_SIZE_MAX
)
2383 if (args
->count
== 0)
2386 bits
= vmalloc(array_size(sizeof(*bits
), args
->count
));
2390 r
= copy_from_user(bits
, (void __user
*)args
->values
, args
->count
);
2396 mmap_read_lock(kvm
->mm
);
2397 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2398 for (i
= 0; i
< args
->count
; i
++) {
2399 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
2400 if (kvm_is_error_hva(hva
)) {
2406 pgstev
= pgstev
<< 24;
2407 mask
&= _PGSTE_GPS_USAGE_MASK
| _PGSTE_GPS_NODAT
;
2408 set_pgste_bits(kvm
->mm
, hva
, mask
, pgstev
);
2410 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2411 mmap_read_unlock(kvm
->mm
);
2413 if (!kvm
->mm
->context
.uses_cmm
) {
2414 mmap_write_lock(kvm
->mm
);
2415 kvm
->mm
->context
.uses_cmm
= 1;
2416 mmap_write_unlock(kvm
->mm
);
2424 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2426 * @kvm: the VM whose protected vCPUs are to be converted
2427 * @rc: return value for the RC field of the UVC (in case of error)
2428 * @rrc: return value for the RRC field of the UVC (in case of error)
2430 * Does not stop in case of error, tries to convert as many
2431 * CPUs as possible. In case of error, the RC and RRC of the last error are
2434 * Return: 0 in case of success, otherwise -EIO
2436 int kvm_s390_cpus_from_pv(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
2438 struct kvm_vcpu
*vcpu
;
2444 * We ignore failures and try to destroy as many CPUs as possible.
2445 * At the same time we must not free the assigned resources when
2446 * this fails, as the ultravisor has still access to that memory.
2447 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2449 * We want to return the first failure rc and rrc, though.
2451 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2452 mutex_lock(&vcpu
->mutex
);
2453 if (kvm_s390_pv_destroy_cpu(vcpu
, &_rc
, &_rrc
) && !ret
) {
2458 mutex_unlock(&vcpu
->mutex
);
2460 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2462 kvm_s390_gisa_enable(kvm
);
2467 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2469 * @kvm: the VM whose protected vCPUs are to be converted
2470 * @rc: return value for the RC field of the UVC (in case of error)
2471 * @rrc: return value for the RRC field of the UVC (in case of error)
2473 * Tries to undo the conversion in case of error.
2475 * Return: 0 in case of success, otherwise -EIO
2477 static int kvm_s390_cpus_to_pv(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
2483 struct kvm_vcpu
*vcpu
;
2485 /* Disable the GISA if the ultravisor does not support AIV. */
2486 if (!uv_has_feature(BIT_UV_FEAT_AIV
))
2487 kvm_s390_gisa_disable(kvm
);
2489 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2490 mutex_lock(&vcpu
->mutex
);
2491 r
= kvm_s390_pv_create_cpu(vcpu
, rc
, rrc
);
2492 mutex_unlock(&vcpu
->mutex
);
2497 kvm_s390_cpus_from_pv(kvm
, &dummy
, &dummy
);
2502 * Here we provide user space with a direct interface to query UV
2503 * related data like UV maxima and available features as well as
2504 * feature specific data.
2506 * To facilitate future extension of the data structures we'll try to
2507 * write data up to the maximum requested length.
2509 static ssize_t
kvm_s390_handle_pv_info(struct kvm_s390_pv_info
*info
)
2513 switch (info
->header
.id
) {
2514 case KVM_PV_INFO_VM
: {
2515 len_min
= sizeof(info
->header
) + sizeof(info
->vm
);
2517 if (info
->header
.len_max
< len_min
)
2520 memcpy(info
->vm
.inst_calls_list
,
2521 uv_info
.inst_calls_list
,
2522 sizeof(uv_info
.inst_calls_list
));
2524 /* It's max cpuid not max cpus, so it's off by one */
2525 info
->vm
.max_cpus
= uv_info
.max_guest_cpu_id
+ 1;
2526 info
->vm
.max_guests
= uv_info
.max_num_sec_conf
;
2527 info
->vm
.max_guest_addr
= uv_info
.max_sec_stor_addr
;
2528 info
->vm
.feature_indication
= uv_info
.uv_feature_indications
;
2532 case KVM_PV_INFO_DUMP
: {
2533 len_min
= sizeof(info
->header
) + sizeof(info
->dump
);
2535 if (info
->header
.len_max
< len_min
)
2538 info
->dump
.dump_cpu_buffer_len
= uv_info
.guest_cpu_stor_len
;
2539 info
->dump
.dump_config_mem_buffer_per_1m
= uv_info
.conf_dump_storage_state_len
;
2540 info
->dump
.dump_config_finalize_len
= uv_info
.conf_dump_finalize_len
;
2548 static int kvm_s390_pv_dmp(struct kvm
*kvm
, struct kvm_pv_cmd
*cmd
,
2549 struct kvm_s390_pv_dmp dmp
)
2552 void __user
*result_buff
= (void __user
*)dmp
.buff_addr
;
2554 switch (dmp
.subcmd
) {
2555 case KVM_PV_DUMP_INIT
: {
2556 if (kvm
->arch
.pv
.dumping
)
2560 * Block SIE entry as concurrent dump UVCs could lead
2563 kvm_s390_vcpu_block_all(kvm
);
2565 r
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
2566 UVC_CMD_DUMP_INIT
, &cmd
->rc
, &cmd
->rrc
);
2567 KVM_UV_EVENT(kvm
, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2570 kvm
->arch
.pv
.dumping
= true;
2572 kvm_s390_vcpu_unblock_all(kvm
);
2577 case KVM_PV_DUMP_CONFIG_STOR_STATE
: {
2578 if (!kvm
->arch
.pv
.dumping
)
2582 * gaddr is an output parameter since we might stop
2583 * early. As dmp will be copied back in our caller, we
2584 * don't need to do it ourselves.
2586 r
= kvm_s390_pv_dump_stor_state(kvm
, result_buff
, &dmp
.gaddr
, dmp
.buff_len
,
2587 &cmd
->rc
, &cmd
->rrc
);
2590 case KVM_PV_DUMP_COMPLETE
: {
2591 if (!kvm
->arch
.pv
.dumping
)
2595 if (dmp
.buff_len
< uv_info
.conf_dump_finalize_len
)
2598 r
= kvm_s390_pv_dump_complete(kvm
, result_buff
,
2599 &cmd
->rc
, &cmd
->rrc
);
2610 static int kvm_s390_handle_pv(struct kvm
*kvm
, struct kvm_pv_cmd
*cmd
)
2612 const bool need_lock
= (cmd
->cmd
!= KVM_PV_ASYNC_CLEANUP_PERFORM
);
2613 void __user
*argp
= (void __user
*)cmd
->data
;
2618 mutex_lock(&kvm
->lock
);
2621 case KVM_PV_ENABLE
: {
2623 if (kvm_s390_pv_is_protected(kvm
))
2627 * FMT 4 SIE needs esca. As we never switch back to bsca from
2628 * esca, we need no cleanup in the error cases below
2630 r
= sca_switch_to_extended(kvm
);
2634 mmap_write_lock(current
->mm
);
2635 r
= gmap_mark_unmergeable();
2636 mmap_write_unlock(current
->mm
);
2640 r
= kvm_s390_pv_init_vm(kvm
, &cmd
->rc
, &cmd
->rrc
);
2644 r
= kvm_s390_cpus_to_pv(kvm
, &cmd
->rc
, &cmd
->rrc
);
2646 kvm_s390_pv_deinit_vm(kvm
, &dummy
, &dummy
);
2648 /* we need to block service interrupts from now on */
2649 set_bit(IRQ_PEND_EXT_SERVICE
, &kvm
->arch
.float_int
.masked_irqs
);
2652 case KVM_PV_ASYNC_CLEANUP_PREPARE
:
2654 if (!kvm_s390_pv_is_protected(kvm
) || !async_destroy
)
2657 r
= kvm_s390_cpus_from_pv(kvm
, &cmd
->rc
, &cmd
->rrc
);
2659 * If a CPU could not be destroyed, destroy VM will also fail.
2660 * There is no point in trying to destroy it. Instead return
2661 * the rc and rrc from the first CPU that failed destroying.
2665 r
= kvm_s390_pv_set_aside(kvm
, &cmd
->rc
, &cmd
->rrc
);
2667 /* no need to block service interrupts any more */
2668 clear_bit(IRQ_PEND_EXT_SERVICE
, &kvm
->arch
.float_int
.masked_irqs
);
2670 case KVM_PV_ASYNC_CLEANUP_PERFORM
:
2674 /* kvm->lock must not be held; this is asserted inside the function. */
2675 r
= kvm_s390_pv_deinit_aside_vm(kvm
, &cmd
->rc
, &cmd
->rrc
);
2677 case KVM_PV_DISABLE
: {
2679 if (!kvm_s390_pv_is_protected(kvm
))
2682 r
= kvm_s390_cpus_from_pv(kvm
, &cmd
->rc
, &cmd
->rrc
);
2684 * If a CPU could not be destroyed, destroy VM will also fail.
2685 * There is no point in trying to destroy it. Instead return
2686 * the rc and rrc from the first CPU that failed destroying.
2690 r
= kvm_s390_pv_deinit_cleanup_all(kvm
, &cmd
->rc
, &cmd
->rrc
);
2692 /* no need to block service interrupts any more */
2693 clear_bit(IRQ_PEND_EXT_SERVICE
, &kvm
->arch
.float_int
.masked_irqs
);
2696 case KVM_PV_SET_SEC_PARMS
: {
2697 struct kvm_s390_pv_sec_parm parms
= {};
2701 if (!kvm_s390_pv_is_protected(kvm
))
2705 if (copy_from_user(&parms
, argp
, sizeof(parms
)))
2708 /* Currently restricted to 8KB */
2710 if (parms
.length
> PAGE_SIZE
* 2)
2714 hdr
= vmalloc(parms
.length
);
2719 if (!copy_from_user(hdr
, (void __user
*)parms
.origin
,
2721 r
= kvm_s390_pv_set_sec_parms(kvm
, hdr
, parms
.length
,
2722 &cmd
->rc
, &cmd
->rrc
);
2727 case KVM_PV_UNPACK
: {
2728 struct kvm_s390_pv_unp unp
= {};
2731 if (!kvm_s390_pv_is_protected(kvm
) || !mm_is_protected(kvm
->mm
))
2735 if (copy_from_user(&unp
, argp
, sizeof(unp
)))
2738 r
= kvm_s390_pv_unpack(kvm
, unp
.addr
, unp
.size
, unp
.tweak
,
2739 &cmd
->rc
, &cmd
->rrc
);
2742 case KVM_PV_VERIFY
: {
2744 if (!kvm_s390_pv_is_protected(kvm
))
2747 r
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
2748 UVC_CMD_VERIFY_IMG
, &cmd
->rc
, &cmd
->rrc
);
2749 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd
->rc
,
2753 case KVM_PV_PREP_RESET
: {
2755 if (!kvm_s390_pv_is_protected(kvm
))
2758 r
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
2759 UVC_CMD_PREPARE_RESET
, &cmd
->rc
, &cmd
->rrc
);
2760 KVM_UV_EVENT(kvm
, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2764 case KVM_PV_UNSHARE_ALL
: {
2766 if (!kvm_s390_pv_is_protected(kvm
))
2769 r
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
2770 UVC_CMD_SET_UNSHARE_ALL
, &cmd
->rc
, &cmd
->rrc
);
2771 KVM_UV_EVENT(kvm
, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2776 struct kvm_s390_pv_info info
= {};
2780 * No need to check the VM protection here.
2782 * Maybe user space wants to query some of the data
2783 * when the VM is still unprotected. If we see the
2784 * need to fence a new data command we can still
2785 * return an error in the info handler.
2789 if (copy_from_user(&info
, argp
, sizeof(info
.header
)))
2793 if (info
.header
.len_max
< sizeof(info
.header
))
2796 data_len
= kvm_s390_handle_pv_info(&info
);
2802 * If a data command struct is extended (multiple
2803 * times) this can be used to determine how much of it
2806 info
.header
.len_written
= data_len
;
2809 if (copy_to_user(argp
, &info
, data_len
))
2816 struct kvm_s390_pv_dmp dmp
;
2819 if (!kvm_s390_pv_is_protected(kvm
))
2823 if (copy_from_user(&dmp
, argp
, sizeof(dmp
)))
2826 r
= kvm_s390_pv_dmp(kvm
, cmd
, dmp
);
2830 if (copy_to_user(argp
, &dmp
, sizeof(dmp
))) {
2841 mutex_unlock(&kvm
->lock
);
2846 static int mem_op_validate_common(struct kvm_s390_mem_op
*mop
, u64 supported_flags
)
2848 if (mop
->flags
& ~supported_flags
|| !mop
->size
)
2850 if (mop
->size
> MEM_OP_MAX_SIZE
)
2852 if (mop
->flags
& KVM_S390_MEMOP_F_SKEY_PROTECTION
) {
2861 static int kvm_s390_vm_mem_op_abs(struct kvm
*kvm
, struct kvm_s390_mem_op
*mop
)
2863 void __user
*uaddr
= (void __user
*)mop
->buf
;
2864 enum gacc_mode acc_mode
;
2865 void *tmpbuf
= NULL
;
2868 r
= mem_op_validate_common(mop
, KVM_S390_MEMOP_F_SKEY_PROTECTION
|
2869 KVM_S390_MEMOP_F_CHECK_ONLY
);
2873 if (!(mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
)) {
2874 tmpbuf
= vmalloc(mop
->size
);
2879 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2881 if (!kvm_is_gpa_in_memslot(kvm
, mop
->gaddr
)) {
2886 acc_mode
= mop
->op
== KVM_S390_MEMOP_ABSOLUTE_READ
? GACC_FETCH
: GACC_STORE
;
2887 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
2888 r
= check_gpa_range(kvm
, mop
->gaddr
, mop
->size
, acc_mode
, mop
->key
);
2891 if (acc_mode
== GACC_FETCH
) {
2892 r
= access_guest_abs_with_key(kvm
, mop
->gaddr
, tmpbuf
,
2893 mop
->size
, GACC_FETCH
, mop
->key
);
2896 if (copy_to_user(uaddr
, tmpbuf
, mop
->size
))
2899 if (copy_from_user(tmpbuf
, uaddr
, mop
->size
)) {
2903 r
= access_guest_abs_with_key(kvm
, mop
->gaddr
, tmpbuf
,
2904 mop
->size
, GACC_STORE
, mop
->key
);
2908 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2914 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm
*kvm
, struct kvm_s390_mem_op
*mop
)
2916 void __user
*uaddr
= (void __user
*)mop
->buf
;
2917 void __user
*old_addr
= (void __user
*)mop
->old_addr
;
2920 char raw
[sizeof(__uint128_t
)];
2921 } old
= { .quad
= 0}, new = { .quad
= 0 };
2922 unsigned int off_in_quad
= sizeof(new) - mop
->size
;
2926 r
= mem_op_validate_common(mop
, KVM_S390_MEMOP_F_SKEY_PROTECTION
);
2930 * This validates off_in_quad. Checking that size is a power
2931 * of two is not necessary, as cmpxchg_guest_abs_with_key
2932 * takes care of that
2934 if (mop
->size
> sizeof(new))
2936 if (copy_from_user(&new.raw
[off_in_quad
], uaddr
, mop
->size
))
2938 if (copy_from_user(&old
.raw
[off_in_quad
], old_addr
, mop
->size
))
2941 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
2943 if (!kvm_is_gpa_in_memslot(kvm
, mop
->gaddr
)) {
2948 r
= cmpxchg_guest_abs_with_key(kvm
, mop
->gaddr
, mop
->size
, &old
.quad
,
2949 new.quad
, mop
->key
, &success
);
2950 if (!success
&& copy_to_user(old_addr
, &old
.raw
[off_in_quad
], mop
->size
))
2954 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
2958 static int kvm_s390_vm_mem_op(struct kvm
*kvm
, struct kvm_s390_mem_op
*mop
)
2961 * This is technically a heuristic only, if the kvm->lock is not
2962 * taken, it is not guaranteed that the vm is/remains non-protected.
2963 * This is ok from a kernel perspective, wrongdoing is detected
2964 * on the access, -EFAULT is returned and the vm may crash the
2965 * next time it accesses the memory in question.
2966 * There is no sane usecase to do switching and a memop on two
2967 * different CPUs at the same time.
2969 if (kvm_s390_pv_get_handle(kvm
))
2973 case KVM_S390_MEMOP_ABSOLUTE_READ
:
2974 case KVM_S390_MEMOP_ABSOLUTE_WRITE
:
2975 return kvm_s390_vm_mem_op_abs(kvm
, mop
);
2976 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG
:
2977 return kvm_s390_vm_mem_op_cmpxchg(kvm
, mop
);
2983 int kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
2985 struct kvm
*kvm
= filp
->private_data
;
2986 void __user
*argp
= (void __user
*)arg
;
2987 struct kvm_device_attr attr
;
2991 case KVM_S390_INTERRUPT
: {
2992 struct kvm_s390_interrupt s390int
;
2995 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2997 r
= kvm_s390_inject_vm(kvm
, &s390int
);
3000 case KVM_CREATE_IRQCHIP
: {
3001 struct kvm_irq_routing_entry routing
;
3004 if (kvm
->arch
.use_irqchip
) {
3005 /* Set up dummy routing. */
3006 memset(&routing
, 0, sizeof(routing
));
3007 r
= kvm_set_irq_routing(kvm
, &routing
, 0, 0);
3011 case KVM_SET_DEVICE_ATTR
: {
3013 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
3015 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
3018 case KVM_GET_DEVICE_ATTR
: {
3020 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
3022 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
3025 case KVM_HAS_DEVICE_ATTR
: {
3027 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
3029 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
3032 case KVM_S390_GET_SKEYS
: {
3033 struct kvm_s390_skeys args
;
3036 if (copy_from_user(&args
, argp
,
3037 sizeof(struct kvm_s390_skeys
)))
3039 r
= kvm_s390_get_skeys(kvm
, &args
);
3042 case KVM_S390_SET_SKEYS
: {
3043 struct kvm_s390_skeys args
;
3046 if (copy_from_user(&args
, argp
,
3047 sizeof(struct kvm_s390_skeys
)))
3049 r
= kvm_s390_set_skeys(kvm
, &args
);
3052 case KVM_S390_GET_CMMA_BITS
: {
3053 struct kvm_s390_cmma_log args
;
3056 if (copy_from_user(&args
, argp
, sizeof(args
)))
3058 mutex_lock(&kvm
->slots_lock
);
3059 r
= kvm_s390_get_cmma_bits(kvm
, &args
);
3060 mutex_unlock(&kvm
->slots_lock
);
3062 r
= copy_to_user(argp
, &args
, sizeof(args
));
3068 case KVM_S390_SET_CMMA_BITS
: {
3069 struct kvm_s390_cmma_log args
;
3072 if (copy_from_user(&args
, argp
, sizeof(args
)))
3074 mutex_lock(&kvm
->slots_lock
);
3075 r
= kvm_s390_set_cmma_bits(kvm
, &args
);
3076 mutex_unlock(&kvm
->slots_lock
);
3079 case KVM_S390_PV_COMMAND
: {
3080 struct kvm_pv_cmd args
;
3082 /* protvirt means user cpu state */
3083 kvm_s390_set_user_cpu_state_ctrl(kvm
);
3085 if (!is_prot_virt_host()) {
3089 if (copy_from_user(&args
, argp
, sizeof(args
))) {
3097 /* must be called without kvm->lock */
3098 r
= kvm_s390_handle_pv(kvm
, &args
);
3099 if (copy_to_user(argp
, &args
, sizeof(args
))) {
3105 case KVM_S390_MEM_OP
: {
3106 struct kvm_s390_mem_op mem_op
;
3108 if (copy_from_user(&mem_op
, argp
, sizeof(mem_op
)) == 0)
3109 r
= kvm_s390_vm_mem_op(kvm
, &mem_op
);
3114 case KVM_S390_ZPCI_OP
: {
3115 struct kvm_s390_zpci_op args
;
3118 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
))
3120 if (copy_from_user(&args
, argp
, sizeof(args
))) {
3124 r
= kvm_s390_pci_zpci_op(kvm
, &args
);
3134 static int kvm_s390_apxa_installed(void)
3136 struct ap_config_info info
;
3138 if (ap_instructions_available()) {
3139 if (ap_qci(&info
) == 0)
3147 * The format of the crypto control block (CRYCB) is specified in the 3 low
3148 * order bits of the CRYCB designation (CRYCBD) field as follows:
3149 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3150 * AP extended addressing (APXA) facility are installed.
3151 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3152 * Format 2: Both the APXA and MSAX3 facilities are installed
3154 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
3156 kvm
->arch
.crypto
.crycbd
= virt_to_phys(kvm
->arch
.crypto
.crycb
);
3158 /* Clear the CRYCB format bits - i.e., set format 0 by default */
3159 kvm
->arch
.crypto
.crycbd
&= ~(CRYCB_FORMAT_MASK
);
3161 /* Check whether MSAX3 is installed */
3162 if (!test_kvm_facility(kvm
, 76))
3165 if (kvm_s390_apxa_installed())
3166 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
3168 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
3172 * kvm_arch_crypto_set_masks
3174 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3176 * @apm: the mask identifying the accessible AP adapters
3177 * @aqm: the mask identifying the accessible AP domains
3178 * @adm: the mask identifying the accessible AP control domains
3180 * Set the masks that identify the adapters, domains and control domains to
3181 * which the KVM guest is granted access.
3183 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3186 void kvm_arch_crypto_set_masks(struct kvm
*kvm
, unsigned long *apm
,
3187 unsigned long *aqm
, unsigned long *adm
)
3189 struct kvm_s390_crypto_cb
*crycb
= kvm
->arch
.crypto
.crycb
;
3191 kvm_s390_vcpu_block_all(kvm
);
3193 switch (kvm
->arch
.crypto
.crycbd
& CRYCB_FORMAT_MASK
) {
3194 case CRYCB_FORMAT2
: /* APCB1 use 256 bits */
3195 memcpy(crycb
->apcb1
.apm
, apm
, 32);
3196 VM_EVENT(kvm
, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3197 apm
[0], apm
[1], apm
[2], apm
[3]);
3198 memcpy(crycb
->apcb1
.aqm
, aqm
, 32);
3199 VM_EVENT(kvm
, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3200 aqm
[0], aqm
[1], aqm
[2], aqm
[3]);
3201 memcpy(crycb
->apcb1
.adm
, adm
, 32);
3202 VM_EVENT(kvm
, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3203 adm
[0], adm
[1], adm
[2], adm
[3]);
3206 case CRYCB_FORMAT0
: /* Fall through both use APCB0 */
3207 memcpy(crycb
->apcb0
.apm
, apm
, 8);
3208 memcpy(crycb
->apcb0
.aqm
, aqm
, 2);
3209 memcpy(crycb
->apcb0
.adm
, adm
, 2);
3210 VM_EVENT(kvm
, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3211 apm
[0], *((unsigned short *)aqm
),
3212 *((unsigned short *)adm
));
3214 default: /* Can not happen */
3218 /* recreate the shadow crycb for each vcpu */
3219 kvm_s390_sync_request_broadcast(kvm
, KVM_REQ_VSIE_RESTART
);
3220 kvm_s390_vcpu_unblock_all(kvm
);
3222 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks
);
3225 * kvm_arch_crypto_clear_masks
3227 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3230 * Clear the masks that identify the adapters, domains and control domains to
3231 * which the KVM guest is granted access.
3233 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3236 void kvm_arch_crypto_clear_masks(struct kvm
*kvm
)
3238 kvm_s390_vcpu_block_all(kvm
);
3240 memset(&kvm
->arch
.crypto
.crycb
->apcb0
, 0,
3241 sizeof(kvm
->arch
.crypto
.crycb
->apcb0
));
3242 memset(&kvm
->arch
.crypto
.crycb
->apcb1
, 0,
3243 sizeof(kvm
->arch
.crypto
.crycb
->apcb1
));
3245 VM_EVENT(kvm
, 3, "%s", "CLR CRYCB:");
3246 /* recreate the shadow crycb for each vcpu */
3247 kvm_s390_sync_request_broadcast(kvm
, KVM_REQ_VSIE_RESTART
);
3248 kvm_s390_vcpu_unblock_all(kvm
);
3250 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks
);
3252 static u64
kvm_s390_get_initial_cpuid(void)
3257 cpuid
.version
= 0xff;
3258 return *((u64
*) &cpuid
);
3261 static void kvm_s390_crypto_init(struct kvm
*kvm
)
3263 kvm
->arch
.crypto
.crycb
= &kvm
->arch
.sie_page2
->crycb
;
3264 kvm_s390_set_crycb_format(kvm
);
3265 init_rwsem(&kvm
->arch
.crypto
.pqap_hook_rwsem
);
3267 if (!test_kvm_facility(kvm
, 76))
3270 /* Enable AES/DEA protected key functions by default */
3271 kvm
->arch
.crypto
.aes_kw
= 1;
3272 kvm
->arch
.crypto
.dea_kw
= 1;
3273 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
3274 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
3275 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
3276 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
3279 static void sca_dispose(struct kvm
*kvm
)
3281 if (kvm
->arch
.use_esca
)
3282 free_pages_exact(kvm
->arch
.sca
, sizeof(struct esca_block
));
3284 free_page((unsigned long)(kvm
->arch
.sca
));
3285 kvm
->arch
.sca
= NULL
;
3288 void kvm_arch_free_vm(struct kvm
*kvm
)
3290 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
))
3291 kvm_s390_pci_clear_list(kvm
);
3293 __kvm_arch_free_vm(kvm
);
3296 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
3298 gfp_t alloc_flags
= GFP_KERNEL_ACCOUNT
;
3300 char debug_name
[16];
3301 static unsigned long sca_offset
;
3304 #ifdef CONFIG_KVM_S390_UCONTROL
3305 if (type
& ~KVM_VM_S390_UCONTROL
)
3307 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
3314 rc
= s390_enable_sie();
3320 if (!sclp
.has_64bscao
)
3321 alloc_flags
|= GFP_DMA
;
3322 rwlock_init(&kvm
->arch
.sca_lock
);
3323 /* start with basic SCA */
3324 kvm
->arch
.sca
= (struct bsca_block
*) get_zeroed_page(alloc_flags
);
3327 mutex_lock(&kvm_lock
);
3329 if (sca_offset
+ sizeof(struct bsca_block
) > PAGE_SIZE
)
3331 kvm
->arch
.sca
= (struct bsca_block
*)
3332 ((char *) kvm
->arch
.sca
+ sca_offset
);
3333 mutex_unlock(&kvm_lock
);
3335 sprintf(debug_name
, "kvm-%u", current
->pid
);
3337 kvm
->arch
.dbf
= debug_register(debug_name
, 32, 1, 7 * sizeof(long));
3341 BUILD_BUG_ON(sizeof(struct sie_page2
) != 4096);
3342 kvm
->arch
.sie_page2
=
3343 (struct sie_page2
*) get_zeroed_page(GFP_KERNEL_ACCOUNT
| GFP_DMA
);
3344 if (!kvm
->arch
.sie_page2
)
3347 kvm
->arch
.sie_page2
->kvm
= kvm
;
3348 kvm
->arch
.model
.fac_list
= kvm
->arch
.sie_page2
->fac_list
;
3350 for (i
= 0; i
< kvm_s390_fac_size(); i
++) {
3351 kvm
->arch
.model
.fac_mask
[i
] = stfle_fac_list
[i
] &
3352 (kvm_s390_fac_base
[i
] |
3353 kvm_s390_fac_ext
[i
]);
3354 kvm
->arch
.model
.fac_list
[i
] = stfle_fac_list
[i
] &
3355 kvm_s390_fac_base
[i
];
3357 kvm
->arch
.model
.subfuncs
= kvm_s390_available_subfunc
;
3359 /* we are always in czam mode - even on pre z14 machines */
3360 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 138);
3361 set_kvm_facility(kvm
->arch
.model
.fac_list
, 138);
3362 /* we emulate STHYI in kvm */
3363 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 74);
3364 set_kvm_facility(kvm
->arch
.model
.fac_list
, 74);
3365 if (MACHINE_HAS_TLB_GUEST
) {
3366 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 147);
3367 set_kvm_facility(kvm
->arch
.model
.fac_list
, 147);
3370 if (css_general_characteristics
.aiv
&& test_facility(65))
3371 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 65);
3373 kvm
->arch
.model
.cpuid
= kvm_s390_get_initial_cpuid();
3374 kvm
->arch
.model
.ibc
= sclp
.ibc
& 0x0fff;
3376 kvm
->arch
.model
.uv_feat_guest
.feat
= 0;
3378 kvm_s390_crypto_init(kvm
);
3380 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM
)) {
3381 mutex_lock(&kvm
->lock
);
3382 kvm_s390_pci_init_list(kvm
);
3383 kvm_s390_vcpu_pci_enable_interp(kvm
);
3384 mutex_unlock(&kvm
->lock
);
3387 mutex_init(&kvm
->arch
.float_int
.ais_lock
);
3388 spin_lock_init(&kvm
->arch
.float_int
.lock
);
3389 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
3390 INIT_LIST_HEAD(&kvm
->arch
.float_int
.lists
[i
]);
3391 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
3392 mutex_init(&kvm
->arch
.ipte_mutex
);
3394 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
3395 VM_EVENT(kvm
, 3, "vm created with type %lu", type
);
3397 if (type
& KVM_VM_S390_UCONTROL
) {
3398 kvm
->arch
.gmap
= NULL
;
3399 kvm
->arch
.mem_limit
= KVM_S390_NO_MEM_LIMIT
;
3401 if (sclp
.hamax
== U64_MAX
)
3402 kvm
->arch
.mem_limit
= TASK_SIZE_MAX
;
3404 kvm
->arch
.mem_limit
= min_t(unsigned long, TASK_SIZE_MAX
,
3406 kvm
->arch
.gmap
= gmap_create(current
->mm
, kvm
->arch
.mem_limit
- 1);
3407 if (!kvm
->arch
.gmap
)
3409 kvm
->arch
.gmap
->private = kvm
;
3410 kvm
->arch
.gmap
->pfault_enabled
= 0;
3413 kvm
->arch
.use_pfmfi
= sclp
.has_pfmfi
;
3414 kvm
->arch
.use_skf
= sclp
.has_skey
;
3415 spin_lock_init(&kvm
->arch
.start_stop_lock
);
3416 kvm_s390_vsie_init(kvm
);
3418 kvm_s390_gisa_init(kvm
);
3419 INIT_LIST_HEAD(&kvm
->arch
.pv
.need_cleanup
);
3420 kvm
->arch
.pv
.set_aside
= NULL
;
3421 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm
, current
->pid
);
3425 free_page((unsigned long)kvm
->arch
.sie_page2
);
3426 debug_unregister(kvm
->arch
.dbf
);
3428 KVM_EVENT(3, "creation of vm failed: %d", rc
);
3432 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
3436 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
3437 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
3438 kvm_s390_clear_local_irqs(vcpu
);
3439 kvm_clear_async_pf_completion_queue(vcpu
);
3440 if (!kvm_is_ucontrol(vcpu
->kvm
))
3442 kvm_s390_update_topology_change_report(vcpu
->kvm
, 1);
3444 if (kvm_is_ucontrol(vcpu
->kvm
))
3445 gmap_remove(vcpu
->arch
.gmap
);
3447 if (vcpu
->kvm
->arch
.use_cmma
)
3448 kvm_s390_vcpu_unsetup_cmma(vcpu
);
3449 /* We can not hold the vcpu mutex here, we are already dying */
3450 if (kvm_s390_pv_cpu_get_handle(vcpu
))
3451 kvm_s390_pv_destroy_cpu(vcpu
, &rc
, &rrc
);
3452 free_page((unsigned long)(vcpu
->arch
.sie_block
));
3455 void kvm_arch_destroy_vm(struct kvm
*kvm
)
3459 kvm_destroy_vcpus(kvm
);
3461 kvm_s390_gisa_destroy(kvm
);
3463 * We are already at the end of life and kvm->lock is not taken.
3464 * This is ok as the file descriptor is closed by now and nobody
3465 * can mess with the pv state.
3467 kvm_s390_pv_deinit_cleanup_all(kvm
, &rc
, &rrc
);
3469 * Remove the mmu notifier only when the whole KVM VM is torn down,
3470 * and only if one was registered to begin with. If the VM is
3471 * currently not protected, but has been previously been protected,
3472 * then it's possible that the notifier is still registered.
3474 if (kvm
->arch
.pv
.mmu_notifier
.ops
)
3475 mmu_notifier_unregister(&kvm
->arch
.pv
.mmu_notifier
, kvm
->mm
);
3477 debug_unregister(kvm
->arch
.dbf
);
3478 free_page((unsigned long)kvm
->arch
.sie_page2
);
3479 if (!kvm_is_ucontrol(kvm
))
3480 gmap_remove(kvm
->arch
.gmap
);
3481 kvm_s390_destroy_adapters(kvm
);
3482 kvm_s390_clear_float_irqs(kvm
);
3483 kvm_s390_vsie_destroy(kvm
);
3484 KVM_EVENT(3, "vm 0x%pK destroyed", kvm
);
3487 /* Section: vcpu related */
3488 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
3490 vcpu
->arch
.gmap
= gmap_create(current
->mm
, -1UL);
3491 if (!vcpu
->arch
.gmap
)
3493 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
3498 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
)
3500 if (!kvm_s390_use_sca_entries())
3502 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
3503 if (vcpu
->kvm
->arch
.use_esca
) {
3504 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
3506 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
3507 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
3509 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
3511 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
3512 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
3514 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
3517 static void sca_add_vcpu(struct kvm_vcpu
*vcpu
)
3519 if (!kvm_s390_use_sca_entries()) {
3520 phys_addr_t sca_phys
= virt_to_phys(vcpu
->kvm
->arch
.sca
);
3522 /* we still need the basic sca for the ipte control */
3523 vcpu
->arch
.sie_block
->scaoh
= sca_phys
>> 32;
3524 vcpu
->arch
.sie_block
->scaol
= sca_phys
;
3527 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
3528 if (vcpu
->kvm
->arch
.use_esca
) {
3529 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
3530 phys_addr_t sca_phys
= virt_to_phys(sca
);
3532 sca
->cpu
[vcpu
->vcpu_id
].sda
= virt_to_phys(vcpu
->arch
.sie_block
);
3533 vcpu
->arch
.sie_block
->scaoh
= sca_phys
>> 32;
3534 vcpu
->arch
.sie_block
->scaol
= sca_phys
& ESCA_SCAOL_MASK
;
3535 vcpu
->arch
.sie_block
->ecb2
|= ECB2_ESCA
;
3536 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
3538 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
3539 phys_addr_t sca_phys
= virt_to_phys(sca
);
3541 sca
->cpu
[vcpu
->vcpu_id
].sda
= virt_to_phys(vcpu
->arch
.sie_block
);
3542 vcpu
->arch
.sie_block
->scaoh
= sca_phys
>> 32;
3543 vcpu
->arch
.sie_block
->scaol
= sca_phys
;
3544 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
3546 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
3549 /* Basic SCA to Extended SCA data copy routines */
3550 static inline void sca_copy_entry(struct esca_entry
*d
, struct bsca_entry
*s
)
3553 d
->sigp_ctrl
.c
= s
->sigp_ctrl
.c
;
3554 d
->sigp_ctrl
.scn
= s
->sigp_ctrl
.scn
;
3557 static void sca_copy_b_to_e(struct esca_block
*d
, struct bsca_block
*s
)
3561 d
->ipte_control
= s
->ipte_control
;
3563 for (i
= 0; i
< KVM_S390_BSCA_CPU_SLOTS
; i
++)
3564 sca_copy_entry(&d
->cpu
[i
], &s
->cpu
[i
]);
3567 static int sca_switch_to_extended(struct kvm
*kvm
)
3569 struct bsca_block
*old_sca
= kvm
->arch
.sca
;
3570 struct esca_block
*new_sca
;
3571 struct kvm_vcpu
*vcpu
;
3572 unsigned long vcpu_idx
;
3574 phys_addr_t new_sca_phys
;
3576 if (kvm
->arch
.use_esca
)
3579 new_sca
= alloc_pages_exact(sizeof(*new_sca
), GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
3583 new_sca_phys
= virt_to_phys(new_sca
);
3584 scaoh
= new_sca_phys
>> 32;
3585 scaol
= new_sca_phys
& ESCA_SCAOL_MASK
;
3587 kvm_s390_vcpu_block_all(kvm
);
3588 write_lock(&kvm
->arch
.sca_lock
);
3590 sca_copy_b_to_e(new_sca
, old_sca
);
3592 kvm_for_each_vcpu(vcpu_idx
, vcpu
, kvm
) {
3593 vcpu
->arch
.sie_block
->scaoh
= scaoh
;
3594 vcpu
->arch
.sie_block
->scaol
= scaol
;
3595 vcpu
->arch
.sie_block
->ecb2
|= ECB2_ESCA
;
3597 kvm
->arch
.sca
= new_sca
;
3598 kvm
->arch
.use_esca
= 1;
3600 write_unlock(&kvm
->arch
.sca_lock
);
3601 kvm_s390_vcpu_unblock_all(kvm
);
3603 free_page((unsigned long)old_sca
);
3605 VM_EVENT(kvm
, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3606 old_sca
, kvm
->arch
.sca
);
3610 static int sca_can_add_vcpu(struct kvm
*kvm
, unsigned int id
)
3614 if (!kvm_s390_use_sca_entries()) {
3615 if (id
< KVM_MAX_VCPUS
)
3619 if (id
< KVM_S390_BSCA_CPU_SLOTS
)
3621 if (!sclp
.has_esca
|| !sclp
.has_64bscao
)
3624 rc
= kvm
->arch
.use_esca
? 0 : sca_switch_to_extended(kvm
);
3626 return rc
== 0 && id
< KVM_S390_ESCA_CPU_SLOTS
;
3629 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3630 static void __start_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3632 WARN_ON_ONCE(vcpu
->arch
.cputm_start
!= 0);
3633 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
3634 vcpu
->arch
.cputm_start
= get_tod_clock_fast();
3635 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
3638 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3639 static void __stop_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3641 WARN_ON_ONCE(vcpu
->arch
.cputm_start
== 0);
3642 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
3643 vcpu
->arch
.sie_block
->cputm
-= get_tod_clock_fast() - vcpu
->arch
.cputm_start
;
3644 vcpu
->arch
.cputm_start
= 0;
3645 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
3648 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3649 static void __enable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3651 WARN_ON_ONCE(vcpu
->arch
.cputm_enabled
);
3652 vcpu
->arch
.cputm_enabled
= true;
3653 __start_cpu_timer_accounting(vcpu
);
3656 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3657 static void __disable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3659 WARN_ON_ONCE(!vcpu
->arch
.cputm_enabled
);
3660 __stop_cpu_timer_accounting(vcpu
);
3661 vcpu
->arch
.cputm_enabled
= false;
3664 static void enable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3666 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3667 __enable_cpu_timer_accounting(vcpu
);
3671 static void disable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
3673 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3674 __disable_cpu_timer_accounting(vcpu
);
3678 /* set the cpu timer - may only be called from the VCPU thread itself */
3679 void kvm_s390_set_cpu_timer(struct kvm_vcpu
*vcpu
, __u64 cputm
)
3681 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3682 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
3683 if (vcpu
->arch
.cputm_enabled
)
3684 vcpu
->arch
.cputm_start
= get_tod_clock_fast();
3685 vcpu
->arch
.sie_block
->cputm
= cputm
;
3686 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
3690 /* update and get the cpu timer - can also be called from other VCPU threads */
3691 __u64
kvm_s390_get_cpu_timer(struct kvm_vcpu
*vcpu
)
3696 if (unlikely(!vcpu
->arch
.cputm_enabled
))
3697 return vcpu
->arch
.sie_block
->cputm
;
3699 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3701 seq
= raw_read_seqcount(&vcpu
->arch
.cputm_seqcount
);
3703 * If the writer would ever execute a read in the critical
3704 * section, e.g. in irq context, we have a deadlock.
3706 WARN_ON_ONCE((seq
& 1) && smp_processor_id() == vcpu
->cpu
);
3707 value
= vcpu
->arch
.sie_block
->cputm
;
3708 /* if cputm_start is 0, accounting is being started/stopped */
3709 if (likely(vcpu
->arch
.cputm_start
))
3710 value
-= get_tod_clock_fast() - vcpu
->arch
.cputm_start
;
3711 } while (read_seqcount_retry(&vcpu
->arch
.cputm_seqcount
, seq
& ~1));
3716 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
3719 gmap_enable(vcpu
->arch
.enabled_gmap
);
3720 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_RUNNING
);
3721 if (vcpu
->arch
.cputm_enabled
&& !is_vcpu_idle(vcpu
))
3722 __start_cpu_timer_accounting(vcpu
);
3726 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
3729 if (vcpu
->arch
.cputm_enabled
&& !is_vcpu_idle(vcpu
))
3730 __stop_cpu_timer_accounting(vcpu
);
3731 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_RUNNING
);
3732 vcpu
->arch
.enabled_gmap
= gmap_get_enabled();
3733 gmap_disable(vcpu
->arch
.enabled_gmap
);
3737 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
3739 mutex_lock(&vcpu
->kvm
->lock
);
3741 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
3742 vcpu
->arch
.sie_block
->epdx
= vcpu
->kvm
->arch
.epdx
;
3744 mutex_unlock(&vcpu
->kvm
->lock
);
3745 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
3746 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
3749 if (test_kvm_facility(vcpu
->kvm
, 74) || vcpu
->kvm
->arch
.user_instr0
)
3750 vcpu
->arch
.sie_block
->ictl
|= ICTL_OPEREXC
;
3751 /* make vcpu_load load the right gmap on the first trigger */
3752 vcpu
->arch
.enabled_gmap
= vcpu
->arch
.gmap
;
3755 static bool kvm_has_pckmo_subfunc(struct kvm
*kvm
, unsigned long nr
)
3757 if (test_bit_inv(nr
, (unsigned long *)&kvm
->arch
.model
.subfuncs
.pckmo
) &&
3758 test_bit_inv(nr
, (unsigned long *)&kvm_s390_available_subfunc
.pckmo
))
3763 static bool kvm_has_pckmo_ecc(struct kvm
*kvm
)
3765 /* At least one ECC subfunction must be present */
3766 return kvm_has_pckmo_subfunc(kvm
, 32) ||
3767 kvm_has_pckmo_subfunc(kvm
, 33) ||
3768 kvm_has_pckmo_subfunc(kvm
, 34) ||
3769 kvm_has_pckmo_subfunc(kvm
, 40) ||
3770 kvm_has_pckmo_subfunc(kvm
, 41);
3774 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
3777 * If the AP instructions are not being interpreted and the MSAX3
3778 * facility is not configured for the guest, there is nothing to set up.
3780 if (!vcpu
->kvm
->arch
.crypto
.apie
&& !test_kvm_facility(vcpu
->kvm
, 76))
3783 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
3784 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
3785 vcpu
->arch
.sie_block
->eca
&= ~ECA_APIE
;
3786 vcpu
->arch
.sie_block
->ecd
&= ~ECD_ECC
;
3788 if (vcpu
->kvm
->arch
.crypto
.apie
)
3789 vcpu
->arch
.sie_block
->eca
|= ECA_APIE
;
3791 /* Set up protected key support */
3792 if (vcpu
->kvm
->arch
.crypto
.aes_kw
) {
3793 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
3794 /* ecc is also wrapped with AES key */
3795 if (kvm_has_pckmo_ecc(vcpu
->kvm
))
3796 vcpu
->arch
.sie_block
->ecd
|= ECD_ECC
;
3799 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
3800 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
3803 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
3805 free_page((unsigned long)phys_to_virt(vcpu
->arch
.sie_block
->cbrlo
));
3806 vcpu
->arch
.sie_block
->cbrlo
= 0;
3809 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
3811 void *cbrlo_page
= (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT
);
3816 vcpu
->arch
.sie_block
->cbrlo
= virt_to_phys(cbrlo_page
);
3820 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu
*vcpu
)
3822 struct kvm_s390_cpu_model
*model
= &vcpu
->kvm
->arch
.model
;
3824 vcpu
->arch
.sie_block
->ibc
= model
->ibc
;
3825 if (test_kvm_facility(vcpu
->kvm
, 7))
3826 vcpu
->arch
.sie_block
->fac
= virt_to_phys(model
->fac_list
);
3829 static int kvm_s390_vcpu_setup(struct kvm_vcpu
*vcpu
)
3834 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
3838 if (test_kvm_facility(vcpu
->kvm
, 78))
3839 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_GED2
);
3840 else if (test_kvm_facility(vcpu
->kvm
, 8))
3841 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_GED
);
3843 kvm_s390_vcpu_setup_model(vcpu
);
3845 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3846 if (MACHINE_HAS_ESOP
)
3847 vcpu
->arch
.sie_block
->ecb
|= ECB_HOSTPROTINT
;
3848 if (test_kvm_facility(vcpu
->kvm
, 9))
3849 vcpu
->arch
.sie_block
->ecb
|= ECB_SRSI
;
3850 if (test_kvm_facility(vcpu
->kvm
, 11))
3851 vcpu
->arch
.sie_block
->ecb
|= ECB_PTF
;
3852 if (test_kvm_facility(vcpu
->kvm
, 73))
3853 vcpu
->arch
.sie_block
->ecb
|= ECB_TE
;
3854 if (!kvm_is_ucontrol(vcpu
->kvm
))
3855 vcpu
->arch
.sie_block
->ecb
|= ECB_SPECI
;
3857 if (test_kvm_facility(vcpu
->kvm
, 8) && vcpu
->kvm
->arch
.use_pfmfi
)
3858 vcpu
->arch
.sie_block
->ecb2
|= ECB2_PFMFI
;
3859 if (test_kvm_facility(vcpu
->kvm
, 130))
3860 vcpu
->arch
.sie_block
->ecb2
|= ECB2_IEP
;
3861 vcpu
->arch
.sie_block
->eca
= ECA_MVPGI
| ECA_PROTEXCI
;
3863 vcpu
->arch
.sie_block
->eca
|= ECA_CEI
;
3865 vcpu
->arch
.sie_block
->eca
|= ECA_IB
;
3867 vcpu
->arch
.sie_block
->eca
|= ECA_SII
;
3868 if (sclp
.has_sigpif
)
3869 vcpu
->arch
.sie_block
->eca
|= ECA_SIGPI
;
3870 if (test_kvm_facility(vcpu
->kvm
, 129)) {
3871 vcpu
->arch
.sie_block
->eca
|= ECA_VX
;
3872 vcpu
->arch
.sie_block
->ecd
|= ECD_HOSTREGMGMT
;
3874 if (test_kvm_facility(vcpu
->kvm
, 139))
3875 vcpu
->arch
.sie_block
->ecd
|= ECD_MEF
;
3876 if (test_kvm_facility(vcpu
->kvm
, 156))
3877 vcpu
->arch
.sie_block
->ecd
|= ECD_ETOKENF
;
3878 if (vcpu
->arch
.sie_block
->gd
) {
3879 vcpu
->arch
.sie_block
->eca
|= ECA_AIV
;
3880 VCPU_EVENT(vcpu
, 3, "AIV gisa format-%u enabled for cpu %03u",
3881 vcpu
->arch
.sie_block
->gd
& 0x3, vcpu
->vcpu_id
);
3883 vcpu
->arch
.sie_block
->sdnxo
= virt_to_phys(&vcpu
->run
->s
.regs
.sdnx
) | SDNXC
;
3884 vcpu
->arch
.sie_block
->riccbd
= virt_to_phys(&vcpu
->run
->s
.regs
.riccb
);
3887 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_KSS
);
3889 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
3891 if (vcpu
->kvm
->arch
.use_cmma
) {
3892 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
3896 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3897 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
3899 vcpu
->arch
.sie_block
->hpid
= HPID_KVM
;
3901 kvm_s390_vcpu_crypto_setup(vcpu
);
3903 kvm_s390_vcpu_pci_setup(vcpu
);
3905 mutex_lock(&vcpu
->kvm
->lock
);
3906 if (kvm_s390_pv_is_protected(vcpu
->kvm
)) {
3907 rc
= kvm_s390_pv_create_cpu(vcpu
, &uvrc
, &uvrrc
);
3909 kvm_s390_vcpu_unsetup_cmma(vcpu
);
3911 mutex_unlock(&vcpu
->kvm
->lock
);
3916 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
3918 if (!kvm_is_ucontrol(kvm
) && !sca_can_add_vcpu(kvm
, id
))
3923 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
3925 struct sie_page
*sie_page
;
3928 BUILD_BUG_ON(sizeof(struct sie_page
) != 4096);
3929 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL_ACCOUNT
);
3933 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
3934 vcpu
->arch
.sie_block
->itdba
= virt_to_phys(&sie_page
->itdb
);
3936 /* the real guest size will always be smaller than msl */
3937 vcpu
->arch
.sie_block
->mso
= 0;
3938 vcpu
->arch
.sie_block
->msl
= sclp
.hamax
;
3940 vcpu
->arch
.sie_block
->icpua
= vcpu
->vcpu_id
;
3941 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
3942 vcpu
->arch
.sie_block
->gd
= kvm_s390_get_gisa_desc(vcpu
->kvm
);
3943 seqcount_init(&vcpu
->arch
.cputm_seqcount
);
3945 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
3946 kvm_clear_async_pf_completion_queue(vcpu
);
3947 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
3954 kvm_s390_set_prefix(vcpu
, 0);
3955 if (test_kvm_facility(vcpu
->kvm
, 64))
3956 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_RICCB
;
3957 if (test_kvm_facility(vcpu
->kvm
, 82))
3958 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_BPBC
;
3959 if (test_kvm_facility(vcpu
->kvm
, 133))
3960 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_GSCB
;
3961 if (test_kvm_facility(vcpu
->kvm
, 156))
3962 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_ETOKEN
;
3963 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3964 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
3967 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_VRS
;
3969 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_FPRS
;
3971 if (kvm_is_ucontrol(vcpu
->kvm
)) {
3972 rc
= __kvm_ucontrol_vcpu_init(vcpu
);
3974 goto out_free_sie_block
;
3977 VM_EVENT(vcpu
->kvm
, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3978 vcpu
->vcpu_id
, vcpu
, vcpu
->arch
.sie_block
);
3979 trace_kvm_s390_create_vcpu(vcpu
->vcpu_id
, vcpu
, vcpu
->arch
.sie_block
);
3981 rc
= kvm_s390_vcpu_setup(vcpu
);
3983 goto out_ucontrol_uninit
;
3985 kvm_s390_update_topology_change_report(vcpu
->kvm
, 1);
3988 out_ucontrol_uninit
:
3989 if (kvm_is_ucontrol(vcpu
->kvm
))
3990 gmap_remove(vcpu
->arch
.gmap
);
3992 free_page((unsigned long)(vcpu
->arch
.sie_block
));
3996 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
3998 clear_bit(vcpu
->vcpu_idx
, vcpu
->kvm
->arch
.gisa_int
.kicked_mask
);
3999 return kvm_s390_vcpu_has_irq(vcpu
, 0);
4002 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
4004 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
);
4007 void kvm_s390_vcpu_block(struct kvm_vcpu
*vcpu
)
4009 atomic_or(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
4013 void kvm_s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
4015 atomic_andnot(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
4018 static void kvm_s390_vcpu_request(struct kvm_vcpu
*vcpu
)
4020 atomic_or(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
4024 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu
*vcpu
)
4026 return atomic_read(&vcpu
->arch
.sie_block
->prog20
) &
4027 (PROG_BLOCK_SIE
| PROG_REQUEST
);
4030 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu
*vcpu
)
4032 atomic_andnot(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
4036 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4037 * If the CPU is not running (e.g. waiting as idle) the function will
4038 * return immediately. */
4039 void exit_sie(struct kvm_vcpu
*vcpu
)
4041 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOP_INT
);
4042 kvm_s390_vsie_kick(vcpu
);
4043 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
4047 /* Kick a guest cpu out of SIE to process a request synchronously */
4048 void kvm_s390_sync_request(int req
, struct kvm_vcpu
*vcpu
)
4050 __kvm_make_request(req
, vcpu
);
4051 kvm_s390_vcpu_request(vcpu
);
4054 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
4057 struct kvm
*kvm
= gmap
->private;
4058 struct kvm_vcpu
*vcpu
;
4059 unsigned long prefix
;
4062 trace_kvm_s390_gmap_notifier(start
, end
, gmap_is_shadow(gmap
));
4064 if (gmap_is_shadow(gmap
))
4066 if (start
>= 1UL << 31)
4067 /* We are only interested in prefix pages */
4069 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
4070 /* match against both prefix pages */
4071 prefix
= kvm_s390_get_prefix(vcpu
);
4072 if (prefix
<= end
&& start
<= prefix
+ 2*PAGE_SIZE
- 1) {
4073 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx-%lx",
4075 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX
, vcpu
);
4080 bool kvm_arch_no_poll(struct kvm_vcpu
*vcpu
)
4082 /* do not poll with more than halt_poll_max_steal percent of steal time */
4083 if (S390_lowcore
.avg_steal_timer
* 100 / (TICK_USEC
<< 12) >=
4084 READ_ONCE(halt_poll_max_steal
)) {
4085 vcpu
->stat
.halt_no_poll_steal
++;
4091 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
4093 /* kvm common code refers to this, but never calls it */
4098 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
4099 struct kvm_one_reg
*reg
)
4104 case KVM_REG_S390_TODPR
:
4105 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
4106 (u32 __user
*)reg
->addr
);
4108 case KVM_REG_S390_EPOCHDIFF
:
4109 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
4110 (u64 __user
*)reg
->addr
);
4112 case KVM_REG_S390_CPU_TIMER
:
4113 r
= put_user(kvm_s390_get_cpu_timer(vcpu
),
4114 (u64 __user
*)reg
->addr
);
4116 case KVM_REG_S390_CLOCK_COMP
:
4117 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
4118 (u64 __user
*)reg
->addr
);
4120 case KVM_REG_S390_PFTOKEN
:
4121 r
= put_user(vcpu
->arch
.pfault_token
,
4122 (u64 __user
*)reg
->addr
);
4124 case KVM_REG_S390_PFCOMPARE
:
4125 r
= put_user(vcpu
->arch
.pfault_compare
,
4126 (u64 __user
*)reg
->addr
);
4128 case KVM_REG_S390_PFSELECT
:
4129 r
= put_user(vcpu
->arch
.pfault_select
,
4130 (u64 __user
*)reg
->addr
);
4132 case KVM_REG_S390_PP
:
4133 r
= put_user(vcpu
->arch
.sie_block
->pp
,
4134 (u64 __user
*)reg
->addr
);
4136 case KVM_REG_S390_GBEA
:
4137 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
4138 (u64 __user
*)reg
->addr
);
4147 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
4148 struct kvm_one_reg
*reg
)
4154 case KVM_REG_S390_TODPR
:
4155 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
4156 (u32 __user
*)reg
->addr
);
4158 case KVM_REG_S390_EPOCHDIFF
:
4159 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
4160 (u64 __user
*)reg
->addr
);
4162 case KVM_REG_S390_CPU_TIMER
:
4163 r
= get_user(val
, (u64 __user
*)reg
->addr
);
4165 kvm_s390_set_cpu_timer(vcpu
, val
);
4167 case KVM_REG_S390_CLOCK_COMP
:
4168 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
4169 (u64 __user
*)reg
->addr
);
4171 case KVM_REG_S390_PFTOKEN
:
4172 r
= get_user(vcpu
->arch
.pfault_token
,
4173 (u64 __user
*)reg
->addr
);
4174 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
4175 kvm_clear_async_pf_completion_queue(vcpu
);
4177 case KVM_REG_S390_PFCOMPARE
:
4178 r
= get_user(vcpu
->arch
.pfault_compare
,
4179 (u64 __user
*)reg
->addr
);
4181 case KVM_REG_S390_PFSELECT
:
4182 r
= get_user(vcpu
->arch
.pfault_select
,
4183 (u64 __user
*)reg
->addr
);
4185 case KVM_REG_S390_PP
:
4186 r
= get_user(vcpu
->arch
.sie_block
->pp
,
4187 (u64 __user
*)reg
->addr
);
4189 case KVM_REG_S390_GBEA
:
4190 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
4191 (u64 __user
*)reg
->addr
);
4200 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu
*vcpu
)
4202 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~PSW_MASK_RI
;
4203 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
4204 memset(vcpu
->run
->s
.regs
.riccb
, 0, sizeof(vcpu
->run
->s
.regs
.riccb
));
4206 kvm_clear_async_pf_completion_queue(vcpu
);
4207 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
4208 kvm_s390_vcpu_stop(vcpu
);
4209 kvm_s390_clear_local_irqs(vcpu
);
4212 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
4214 /* Initial reset is a superset of the normal reset */
4215 kvm_arch_vcpu_ioctl_normal_reset(vcpu
);
4218 * This equals initial cpu reset in pop, but we don't switch to ESA.
4219 * We do not only reset the internal data, but also ...
4221 vcpu
->arch
.sie_block
->gpsw
.mask
= 0;
4222 vcpu
->arch
.sie_block
->gpsw
.addr
= 0;
4223 kvm_s390_set_prefix(vcpu
, 0);
4224 kvm_s390_set_cpu_timer(vcpu
, 0);
4225 vcpu
->arch
.sie_block
->ckc
= 0;
4226 memset(vcpu
->arch
.sie_block
->gcr
, 0, sizeof(vcpu
->arch
.sie_block
->gcr
));
4227 vcpu
->arch
.sie_block
->gcr
[0] = CR0_INITIAL_MASK
;
4228 vcpu
->arch
.sie_block
->gcr
[14] = CR14_INITIAL_MASK
;
4230 /* ... the data in sync regs */
4231 memset(vcpu
->run
->s
.regs
.crs
, 0, sizeof(vcpu
->run
->s
.regs
.crs
));
4232 vcpu
->run
->s
.regs
.ckc
= 0;
4233 vcpu
->run
->s
.regs
.crs
[0] = CR0_INITIAL_MASK
;
4234 vcpu
->run
->s
.regs
.crs
[14] = CR14_INITIAL_MASK
;
4235 vcpu
->run
->psw_addr
= 0;
4236 vcpu
->run
->psw_mask
= 0;
4237 vcpu
->run
->s
.regs
.todpr
= 0;
4238 vcpu
->run
->s
.regs
.cputm
= 0;
4239 vcpu
->run
->s
.regs
.ckc
= 0;
4240 vcpu
->run
->s
.regs
.pp
= 0;
4241 vcpu
->run
->s
.regs
.gbea
= 1;
4242 vcpu
->run
->s
.regs
.fpc
= 0;
4244 * Do not reset these registers in the protected case, as some of
4245 * them are overlaid and they are not accessible in this case
4248 if (!kvm_s390_pv_cpu_is_protected(vcpu
)) {
4249 vcpu
->arch
.sie_block
->gbea
= 1;
4250 vcpu
->arch
.sie_block
->pp
= 0;
4251 vcpu
->arch
.sie_block
->fpf
&= ~FPF_BPBC
;
4252 vcpu
->arch
.sie_block
->todpr
= 0;
4256 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu
*vcpu
)
4258 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
4260 /* Clear reset is a superset of the initial reset */
4261 kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
4263 memset(®s
->gprs
, 0, sizeof(regs
->gprs
));
4264 memset(®s
->vrs
, 0, sizeof(regs
->vrs
));
4265 memset(®s
->acrs
, 0, sizeof(regs
->acrs
));
4266 memset(®s
->gscb
, 0, sizeof(regs
->gscb
));
4269 regs
->etoken_extension
= 0;
4272 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
4275 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
4280 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
4283 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
4288 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
4289 struct kvm_sregs
*sregs
)
4293 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
4294 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
4300 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
4301 struct kvm_sregs
*sregs
)
4305 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
4306 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
4312 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
4318 vcpu
->run
->s
.regs
.fpc
= fpu
->fpc
;
4320 convert_fp_to_vx((__vector128
*) vcpu
->run
->s
.regs
.vrs
,
4321 (freg_t
*) fpu
->fprs
);
4323 memcpy(vcpu
->run
->s
.regs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
4329 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
4334 convert_vx_to_fp((freg_t
*) fpu
->fprs
,
4335 (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
4337 memcpy(fpu
->fprs
, vcpu
->run
->s
.regs
.fprs
, sizeof(fpu
->fprs
));
4338 fpu
->fpc
= vcpu
->run
->s
.regs
.fpc
;
4344 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
4348 if (!is_vcpu_stopped(vcpu
))
4351 vcpu
->run
->psw_mask
= psw
.mask
;
4352 vcpu
->run
->psw_addr
= psw
.addr
;
4357 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
4358 struct kvm_translation
*tr
)
4360 return -EINVAL
; /* not implemented yet */
4363 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4364 KVM_GUESTDBG_USE_HW_BP | \
4365 KVM_GUESTDBG_ENABLE)
4367 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
4368 struct kvm_guest_debug
*dbg
)
4374 vcpu
->guest_debug
= 0;
4375 kvm_s390_clear_bp_data(vcpu
);
4377 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
) {
4381 if (!sclp
.has_gpere
) {
4386 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
4387 vcpu
->guest_debug
= dbg
->control
;
4388 /* enforce guest PER */
4389 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_P
);
4391 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
4392 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
4394 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_P
);
4395 vcpu
->arch
.guestdbg
.last_bp
= 0;
4399 vcpu
->guest_debug
= 0;
4400 kvm_s390_clear_bp_data(vcpu
);
4401 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_P
);
4409 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
4410 struct kvm_mp_state
*mp_state
)
4416 /* CHECK_STOP and LOAD are not supported yet */
4417 ret
= is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
4418 KVM_MP_STATE_OPERATING
;
4424 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
4425 struct kvm_mp_state
*mp_state
)
4431 /* user space knows about this interface - let it control the state */
4432 kvm_s390_set_user_cpu_state_ctrl(vcpu
->kvm
);
4434 switch (mp_state
->mp_state
) {
4435 case KVM_MP_STATE_STOPPED
:
4436 rc
= kvm_s390_vcpu_stop(vcpu
);
4438 case KVM_MP_STATE_OPERATING
:
4439 rc
= kvm_s390_vcpu_start(vcpu
);
4441 case KVM_MP_STATE_LOAD
:
4442 if (!kvm_s390_pv_cpu_is_protected(vcpu
)) {
4446 rc
= kvm_s390_pv_set_cpu_state(vcpu
, PV_CPU_STATE_OPR_LOAD
);
4448 case KVM_MP_STATE_CHECK_STOP
:
4449 fallthrough
; /* CHECK_STOP and LOAD are not supported yet */
4458 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
4460 return kvm_s390_test_cpuflags(vcpu
, CPUSTAT_IBS
);
4463 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
4466 kvm_s390_vcpu_request_handled(vcpu
);
4467 if (!kvm_request_pending(vcpu
))
4470 * If the guest prefix changed, re-arm the ipte notifier for the
4471 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4472 * This ensures that the ipte instruction for this request has
4473 * already finished. We might race against a second unmapper that
4474 * wants to set the blocking bit. Lets just retry the request loop.
4476 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX
, vcpu
)) {
4478 rc
= gmap_mprotect_notify(vcpu
->arch
.gmap
,
4479 kvm_s390_get_prefix(vcpu
),
4480 PAGE_SIZE
* 2, PROT_WRITE
);
4482 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX
, vcpu
);
4488 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
4489 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
4493 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
4494 if (!ibs_enabled(vcpu
)) {
4495 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
4496 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_IBS
);
4501 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
4502 if (ibs_enabled(vcpu
)) {
4503 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
4504 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_IBS
);
4509 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC
, vcpu
)) {
4510 vcpu
->arch
.sie_block
->ictl
|= ICTL_OPEREXC
;
4514 if (kvm_check_request(KVM_REQ_START_MIGRATION
, vcpu
)) {
4516 * Disable CMM virtualization; we will emulate the ESSA
4517 * instruction manually, in order to provide additional
4518 * functionalities needed for live migration.
4520 vcpu
->arch
.sie_block
->ecb2
&= ~ECB2_CMMA
;
4524 if (kvm_check_request(KVM_REQ_STOP_MIGRATION
, vcpu
)) {
4526 * Re-enable CMM virtualization if CMMA is available and
4527 * CMM has been used.
4529 if ((vcpu
->kvm
->arch
.use_cmma
) &&
4530 (vcpu
->kvm
->mm
->context
.uses_cmm
))
4531 vcpu
->arch
.sie_block
->ecb2
|= ECB2_CMMA
;
4535 /* we left the vsie handler, nothing to do, just clear the request */
4536 kvm_clear_request(KVM_REQ_VSIE_RESTART
, vcpu
);
4541 static void __kvm_s390_set_tod_clock(struct kvm
*kvm
, const struct kvm_s390_vm_tod_clock
*gtod
)
4543 struct kvm_vcpu
*vcpu
;
4544 union tod_clock clk
;
4549 store_tod_clock_ext(&clk
);
4551 kvm
->arch
.epoch
= gtod
->tod
- clk
.tod
;
4553 if (test_kvm_facility(kvm
, 139)) {
4554 kvm
->arch
.epdx
= gtod
->epoch_idx
- clk
.ei
;
4555 if (kvm
->arch
.epoch
> gtod
->tod
)
4556 kvm
->arch
.epdx
-= 1;
4559 kvm_s390_vcpu_block_all(kvm
);
4560 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
4561 vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
4562 vcpu
->arch
.sie_block
->epdx
= kvm
->arch
.epdx
;
4565 kvm_s390_vcpu_unblock_all(kvm
);
4569 int kvm_s390_try_set_tod_clock(struct kvm
*kvm
, const struct kvm_s390_vm_tod_clock
*gtod
)
4571 if (!mutex_trylock(&kvm
->lock
))
4573 __kvm_s390_set_tod_clock(kvm
, gtod
);
4574 mutex_unlock(&kvm
->lock
);
4579 * kvm_arch_fault_in_page - fault-in guest page if necessary
4580 * @vcpu: The corresponding virtual cpu
4581 * @gpa: Guest physical address
4582 * @writable: Whether the page should be writable or not
4584 * Make sure that a guest page has been faulted-in on the host.
4586 * Return: Zero on success, negative error code otherwise.
4588 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
4590 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
4591 writable
? FAULT_FLAG_WRITE
: 0);
4594 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
4595 unsigned long token
)
4597 struct kvm_s390_interrupt inti
;
4598 struct kvm_s390_irq irq
;
4601 irq
.u
.ext
.ext_params2
= token
;
4602 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
4603 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
4605 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
4606 inti
.parm64
= token
;
4607 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
4611 bool kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
4612 struct kvm_async_pf
*work
)
4614 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
4615 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
4620 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
4621 struct kvm_async_pf
*work
)
4623 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
4624 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
4627 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
4628 struct kvm_async_pf
*work
)
4630 /* s390 will always inject the page directly */
4633 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu
*vcpu
)
4636 * s390 will always inject the page directly,
4637 * but we still want check_async_completion to cleanup
4642 static bool kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
4645 struct kvm_arch_async_pf arch
;
4647 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
4649 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
4650 vcpu
->arch
.pfault_compare
)
4652 if (psw_extint_disabled(vcpu
))
4654 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
4656 if (!(vcpu
->arch
.sie_block
->gcr
[0] & CR0_SERVICE_SIGNAL_SUBMASK
))
4658 if (!vcpu
->arch
.gmap
->pfault_enabled
)
4661 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
4662 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
4663 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
4666 return kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
4669 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
4674 * On s390 notifications for arriving pages will be delivered directly
4675 * to the guest but the house keeping for completed pfaults is
4676 * handled outside the worker.
4678 kvm_check_async_pf_completion(vcpu
);
4680 vcpu
->arch
.sie_block
->gg14
= vcpu
->run
->s
.regs
.gprs
[14];
4681 vcpu
->arch
.sie_block
->gg15
= vcpu
->run
->s
.regs
.gprs
[15];
4686 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
4687 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
4688 if (rc
|| guestdbg_exit_pending(vcpu
))
4692 rc
= kvm_s390_handle_requests(vcpu
);
4696 if (guestdbg_enabled(vcpu
)) {
4697 kvm_s390_backup_guest_per_regs(vcpu
);
4698 kvm_s390_patch_guest_per_regs(vcpu
);
4701 clear_bit(vcpu
->vcpu_idx
, vcpu
->kvm
->arch
.gisa_int
.kicked_mask
);
4703 vcpu
->arch
.sie_block
->icptcode
= 0;
4704 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
4705 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
4706 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
4711 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu
*vcpu
)
4713 struct kvm_s390_pgm_info pgm_info
= {
4714 .code
= PGM_ADDRESSING
,
4719 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
4720 trace_kvm_s390_sie_fault(vcpu
);
4723 * We want to inject an addressing exception, which is defined as a
4724 * suppressing or terminating exception. However, since we came here
4725 * by a DAT access exception, the PSW still points to the faulting
4726 * instruction since DAT exceptions are nullifying. So we've got
4727 * to look up the current opcode to get the length of the instruction
4728 * to be able to forward the PSW.
4730 rc
= read_guest_instr(vcpu
, vcpu
->arch
.sie_block
->gpsw
.addr
, &opcode
, 1);
4731 ilen
= insn_length(opcode
);
4735 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4736 * Forward by arbitrary ilc, injection will take care of
4737 * nullification if necessary.
4739 pgm_info
= vcpu
->arch
.pgm
;
4742 pgm_info
.flags
= ilen
| KVM_S390_PGM_FLAGS_ILC_VALID
;
4743 kvm_s390_forward_psw(vcpu
, ilen
);
4744 return kvm_s390_inject_prog_irq(vcpu
, &pgm_info
);
4747 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
4749 struct mcck_volatile_info
*mcck_info
;
4750 struct sie_page
*sie_page
;
4752 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
4753 vcpu
->arch
.sie_block
->icptcode
);
4754 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
4756 if (guestdbg_enabled(vcpu
))
4757 kvm_s390_restore_guest_per_regs(vcpu
);
4759 vcpu
->run
->s
.regs
.gprs
[14] = vcpu
->arch
.sie_block
->gg14
;
4760 vcpu
->run
->s
.regs
.gprs
[15] = vcpu
->arch
.sie_block
->gg15
;
4762 if (exit_reason
== -EINTR
) {
4763 VCPU_EVENT(vcpu
, 3, "%s", "machine check");
4764 sie_page
= container_of(vcpu
->arch
.sie_block
,
4765 struct sie_page
, sie_block
);
4766 mcck_info
= &sie_page
->mcck_info
;
4767 kvm_s390_reinject_machine_check(vcpu
, mcck_info
);
4771 if (vcpu
->arch
.sie_block
->icptcode
> 0) {
4772 int rc
= kvm_handle_sie_intercept(vcpu
);
4774 if (rc
!= -EOPNOTSUPP
)
4776 vcpu
->run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
4777 vcpu
->run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
4778 vcpu
->run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
4779 vcpu
->run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
4781 } else if (exit_reason
!= -EFAULT
) {
4782 vcpu
->stat
.exit_null
++;
4784 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
4785 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
4786 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
4787 current
->thread
.gmap_addr
;
4788 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
4790 } else if (current
->thread
.gmap_pfault
) {
4791 trace_kvm_s390_major_guest_pfault(vcpu
);
4792 current
->thread
.gmap_pfault
= 0;
4793 if (kvm_arch_setup_async_pf(vcpu
))
4795 vcpu
->stat
.pfault_sync
++;
4796 return kvm_arch_fault_in_page(vcpu
, current
->thread
.gmap_addr
, 1);
4798 return vcpu_post_run_fault_in_sie(vcpu
);
4801 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4802 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
4804 int rc
, exit_reason
;
4805 struct sie_page
*sie_page
= (struct sie_page
*)vcpu
->arch
.sie_block
;
4808 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4809 * ning the guest), so that memslots (and other stuff) are protected
4811 kvm_vcpu_srcu_read_lock(vcpu
);
4814 rc
= vcpu_pre_run(vcpu
);
4815 if (rc
|| guestdbg_exit_pending(vcpu
))
4818 kvm_vcpu_srcu_read_unlock(vcpu
);
4820 * As PF_VCPU will be used in fault handler, between
4821 * guest_enter and guest_exit should be no uaccess.
4823 local_irq_disable();
4824 guest_enter_irqoff();
4825 __disable_cpu_timer_accounting(vcpu
);
4827 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
4828 memcpy(sie_page
->pv_grregs
,
4829 vcpu
->run
->s
.regs
.gprs
,
4830 sizeof(sie_page
->pv_grregs
));
4832 if (test_cpu_flag(CIF_FPU
))
4834 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
4835 vcpu
->run
->s
.regs
.gprs
);
4836 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
4837 memcpy(vcpu
->run
->s
.regs
.gprs
,
4838 sie_page
->pv_grregs
,
4839 sizeof(sie_page
->pv_grregs
));
4841 * We're not allowed to inject interrupts on intercepts
4842 * that leave the guest state in an "in-between" state
4843 * where the next SIE entry will do a continuation.
4844 * Fence interrupts in our "internal" PSW.
4846 if (vcpu
->arch
.sie_block
->icptcode
== ICPT_PV_INSTR
||
4847 vcpu
->arch
.sie_block
->icptcode
== ICPT_PV_PREF
) {
4848 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~PSW_INT_MASK
;
4851 local_irq_disable();
4852 __enable_cpu_timer_accounting(vcpu
);
4853 guest_exit_irqoff();
4855 kvm_vcpu_srcu_read_lock(vcpu
);
4857 rc
= vcpu_post_run(vcpu
, exit_reason
);
4858 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
4860 kvm_vcpu_srcu_read_unlock(vcpu
);
4864 static void sync_regs_fmt2(struct kvm_vcpu
*vcpu
)
4866 struct kvm_run
*kvm_run
= vcpu
->run
;
4867 struct runtime_instr_cb
*riccb
;
4870 riccb
= (struct runtime_instr_cb
*) &kvm_run
->s
.regs
.riccb
;
4871 gscb
= (struct gs_cb
*) &kvm_run
->s
.regs
.gscb
;
4872 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
4873 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
4874 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
4875 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
4876 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
4877 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
4879 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
4880 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
4881 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
4882 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
4883 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
4884 kvm_clear_async_pf_completion_queue(vcpu
);
4886 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_DIAG318
) {
4887 vcpu
->arch
.diag318_info
.val
= kvm_run
->s
.regs
.diag318
;
4888 vcpu
->arch
.sie_block
->cpnc
= vcpu
->arch
.diag318_info
.cpnc
;
4889 VCPU_EVENT(vcpu
, 3, "setting cpnc to %d", vcpu
->arch
.diag318_info
.cpnc
);
4892 * If userspace sets the riccb (e.g. after migration) to a valid state,
4893 * we should enable RI here instead of doing the lazy enablement.
4895 if ((kvm_run
->kvm_dirty_regs
& KVM_SYNC_RICCB
) &&
4896 test_kvm_facility(vcpu
->kvm
, 64) &&
4898 !(vcpu
->arch
.sie_block
->ecb3
& ECB3_RI
)) {
4899 VCPU_EVENT(vcpu
, 3, "%s", "ENABLE: RI (sync_regs)");
4900 vcpu
->arch
.sie_block
->ecb3
|= ECB3_RI
;
4903 * If userspace sets the gscb (e.g. after migration) to non-zero,
4904 * we should enable GS here instead of doing the lazy enablement.
4906 if ((kvm_run
->kvm_dirty_regs
& KVM_SYNC_GSCB
) &&
4907 test_kvm_facility(vcpu
->kvm
, 133) &&
4909 !vcpu
->arch
.gs_enabled
) {
4910 VCPU_EVENT(vcpu
, 3, "%s", "ENABLE: GS (sync_regs)");
4911 vcpu
->arch
.sie_block
->ecb
|= ECB_GS
;
4912 vcpu
->arch
.sie_block
->ecd
|= ECD_HOSTREGMGMT
;
4913 vcpu
->arch
.gs_enabled
= 1;
4915 if ((kvm_run
->kvm_dirty_regs
& KVM_SYNC_BPBC
) &&
4916 test_kvm_facility(vcpu
->kvm
, 82)) {
4917 vcpu
->arch
.sie_block
->fpf
&= ~FPF_BPBC
;
4918 vcpu
->arch
.sie_block
->fpf
|= kvm_run
->s
.regs
.bpbc
? FPF_BPBC
: 0;
4920 if (MACHINE_HAS_GS
) {
4922 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT
);
4923 if (current
->thread
.gs_cb
) {
4924 vcpu
->arch
.host_gscb
= current
->thread
.gs_cb
;
4925 save_gs_cb(vcpu
->arch
.host_gscb
);
4927 if (vcpu
->arch
.gs_enabled
) {
4928 current
->thread
.gs_cb
= (struct gs_cb
*)
4929 &vcpu
->run
->s
.regs
.gscb
;
4930 restore_gs_cb(current
->thread
.gs_cb
);
4934 /* SIE will load etoken directly from SDNX and therefore kvm_run */
4937 static void sync_regs(struct kvm_vcpu
*vcpu
)
4939 struct kvm_run
*kvm_run
= vcpu
->run
;
4941 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
4942 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
4943 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
4944 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
4945 /* some control register changes require a tlb flush */
4946 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
4948 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
4949 kvm_s390_set_cpu_timer(vcpu
, kvm_run
->s
.regs
.cputm
);
4950 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
4952 save_access_regs(vcpu
->arch
.host_acrs
);
4953 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
4954 /* save host (userspace) fprs/vrs */
4956 vcpu
->arch
.host_fpregs
.fpc
= current
->thread
.fpu
.fpc
;
4957 vcpu
->arch
.host_fpregs
.regs
= current
->thread
.fpu
.regs
;
4959 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.vrs
;
4961 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.fprs
;
4962 current
->thread
.fpu
.fpc
= vcpu
->run
->s
.regs
.fpc
;
4964 /* Sync fmt2 only data */
4965 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu
))) {
4966 sync_regs_fmt2(vcpu
);
4969 * In several places we have to modify our internal view to
4970 * not do things that are disallowed by the ultravisor. For
4971 * example we must not inject interrupts after specific exits
4972 * (e.g. 112 prefix page not secure). We do this by turning
4973 * off the machine check, external and I/O interrupt bits
4974 * of our PSW copy. To avoid getting validity intercepts, we
4975 * do only accept the condition code from userspace.
4977 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~PSW_MASK_CC
;
4978 vcpu
->arch
.sie_block
->gpsw
.mask
|= kvm_run
->psw_mask
&
4982 kvm_run
->kvm_dirty_regs
= 0;
4985 static void store_regs_fmt2(struct kvm_vcpu
*vcpu
)
4987 struct kvm_run
*kvm_run
= vcpu
->run
;
4989 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
4990 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
4991 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
4992 kvm_run
->s
.regs
.bpbc
= (vcpu
->arch
.sie_block
->fpf
& FPF_BPBC
) == FPF_BPBC
;
4993 kvm_run
->s
.regs
.diag318
= vcpu
->arch
.diag318_info
.val
;
4994 if (MACHINE_HAS_GS
) {
4996 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT
);
4997 if (vcpu
->arch
.gs_enabled
)
4998 save_gs_cb(current
->thread
.gs_cb
);
4999 current
->thread
.gs_cb
= vcpu
->arch
.host_gscb
;
5000 restore_gs_cb(vcpu
->arch
.host_gscb
);
5001 if (!vcpu
->arch
.host_gscb
)
5002 local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT
);
5003 vcpu
->arch
.host_gscb
= NULL
;
5006 /* SIE will save etoken directly into SDNX and therefore kvm_run */
5009 static void store_regs(struct kvm_vcpu
*vcpu
)
5011 struct kvm_run
*kvm_run
= vcpu
->run
;
5013 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
5014 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
5015 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
5016 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
5017 kvm_run
->s
.regs
.cputm
= kvm_s390_get_cpu_timer(vcpu
);
5018 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
5019 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
5020 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
5021 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
5022 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
5023 restore_access_regs(vcpu
->arch
.host_acrs
);
5024 /* Save guest register state */
5026 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
5027 /* Restore will be done lazily at return */
5028 current
->thread
.fpu
.fpc
= vcpu
->arch
.host_fpregs
.fpc
;
5029 current
->thread
.fpu
.regs
= vcpu
->arch
.host_fpregs
.regs
;
5030 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu
)))
5031 store_regs_fmt2(vcpu
);
5034 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
5036 struct kvm_run
*kvm_run
= vcpu
->run
;
5040 * Running a VM while dumping always has the potential to
5041 * produce inconsistent dump data. But for PV vcpus a SIE
5042 * entry while dumping could also lead to a fatal validity
5043 * intercept which we absolutely want to avoid.
5045 if (vcpu
->kvm
->arch
.pv
.dumping
)
5048 if (kvm_run
->immediate_exit
)
5051 if (kvm_run
->kvm_valid_regs
& ~KVM_SYNC_S390_VALID_FIELDS
||
5052 kvm_run
->kvm_dirty_regs
& ~KVM_SYNC_S390_VALID_FIELDS
)
5057 if (guestdbg_exit_pending(vcpu
)) {
5058 kvm_s390_prepare_debug_exit(vcpu
);
5063 kvm_sigset_activate(vcpu
);
5066 * no need to check the return value of vcpu_start as it can only have
5067 * an error for protvirt, but protvirt means user cpu state
5069 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
5070 kvm_s390_vcpu_start(vcpu
);
5071 } else if (is_vcpu_stopped(vcpu
)) {
5072 pr_err_ratelimited("can't run stopped vcpu %d\n",
5079 enable_cpu_timer_accounting(vcpu
);
5082 rc
= __vcpu_run(vcpu
);
5084 if (signal_pending(current
) && !rc
) {
5085 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
5089 if (guestdbg_exit_pending(vcpu
) && !rc
) {
5090 kvm_s390_prepare_debug_exit(vcpu
);
5094 if (rc
== -EREMOTE
) {
5095 /* userspace support is needed, kvm_run has been prepared */
5099 disable_cpu_timer_accounting(vcpu
);
5102 kvm_sigset_deactivate(vcpu
);
5104 vcpu
->stat
.exit_userspace
++;
5111 * store status at address
5112 * we use have two special cases:
5113 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5114 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5116 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
5118 unsigned char archmode
= 1;
5119 freg_t fprs
[NUM_FPRS
];
5124 px
= kvm_s390_get_prefix(vcpu
);
5125 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
5126 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
5129 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
5130 if (write_guest_real(vcpu
, 163, &archmode
, 1))
5134 gpa
-= __LC_FPREGS_SAVE_AREA
;
5136 /* manually convert vector registers if necessary */
5138 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
5139 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
5142 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
5143 vcpu
->run
->s
.regs
.fprs
, 128);
5145 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_GPREGS_SAVE_AREA
,
5146 vcpu
->run
->s
.regs
.gprs
, 128);
5147 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PSW_SAVE_AREA
,
5148 &vcpu
->arch
.sie_block
->gpsw
, 16);
5149 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PREFIX_SAVE_AREA
,
5151 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_FP_CREG_SAVE_AREA
,
5152 &vcpu
->run
->s
.regs
.fpc
, 4);
5153 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_TOD_PROGREG_SAVE_AREA
,
5154 &vcpu
->arch
.sie_block
->todpr
, 4);
5155 cputm
= kvm_s390_get_cpu_timer(vcpu
);
5156 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CPU_TIMER_SAVE_AREA
,
5158 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
5159 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CLOCK_COMP_SAVE_AREA
,
5161 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_AREGS_SAVE_AREA
,
5162 &vcpu
->run
->s
.regs
.acrs
, 64);
5163 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CREGS_SAVE_AREA
,
5164 &vcpu
->arch
.sie_block
->gcr
, 128);
5165 return rc
? -EFAULT
: 0;
5168 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
5171 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5172 * switch in the run ioctl. Let's update our copies before we save
5173 * it into the save area
5176 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
5177 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
5179 return kvm_s390_store_status_unloaded(vcpu
, addr
);
5182 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
5184 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
5185 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS
, vcpu
);
5188 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
5191 struct kvm_vcpu
*vcpu
;
5193 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
5194 __disable_ibs_on_vcpu(vcpu
);
5198 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
5202 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
5203 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS
, vcpu
);
5206 int kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
5208 int i
, online_vcpus
, r
= 0, started_vcpus
= 0;
5210 if (!is_vcpu_stopped(vcpu
))
5213 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
5214 /* Only one cpu at a time may enter/leave the STOPPED state. */
5215 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
5216 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
5218 /* Let's tell the UV that we want to change into the operating state */
5219 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
5220 r
= kvm_s390_pv_set_cpu_state(vcpu
, PV_CPU_STATE_OPR
);
5222 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
5227 for (i
= 0; i
< online_vcpus
; i
++) {
5228 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu
->kvm
, i
)))
5232 if (started_vcpus
== 0) {
5233 /* we're the only active VCPU -> speed it up */
5234 __enable_ibs_on_vcpu(vcpu
);
5235 } else if (started_vcpus
== 1) {
5237 * As we are starting a second VCPU, we have to disable
5238 * the IBS facility on all VCPUs to remove potentially
5239 * outstanding ENABLE requests.
5241 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
5244 kvm_s390_clear_cpuflags(vcpu
, CPUSTAT_STOPPED
);
5246 * The real PSW might have changed due to a RESTART interpreted by the
5247 * ultravisor. We block all interrupts and let the next sie exit
5250 if (kvm_s390_pv_cpu_is_protected(vcpu
))
5251 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~PSW_INT_MASK
;
5253 * Another VCPU might have used IBS while we were offline.
5254 * Let's play safe and flush the VCPU at startup.
5256 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
5257 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
5261 int kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
5263 int i
, online_vcpus
, r
= 0, started_vcpus
= 0;
5264 struct kvm_vcpu
*started_vcpu
= NULL
;
5266 if (is_vcpu_stopped(vcpu
))
5269 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
5270 /* Only one cpu at a time may enter/leave the STOPPED state. */
5271 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
5272 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
5274 /* Let's tell the UV that we want to change into the stopped state */
5275 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
5276 r
= kvm_s390_pv_set_cpu_state(vcpu
, PV_CPU_STATE_STP
);
5278 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
5284 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5285 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5286 * have been fully processed. This will ensure that the VCPU
5287 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5289 kvm_s390_set_cpuflags(vcpu
, CPUSTAT_STOPPED
);
5290 kvm_s390_clear_stop_irq(vcpu
);
5292 __disable_ibs_on_vcpu(vcpu
);
5294 for (i
= 0; i
< online_vcpus
; i
++) {
5295 struct kvm_vcpu
*tmp
= kvm_get_vcpu(vcpu
->kvm
, i
);
5297 if (!is_vcpu_stopped(tmp
)) {
5303 if (started_vcpus
== 1) {
5305 * As we only have one VCPU left, we want to enable the
5306 * IBS facility for that VCPU to speed it up.
5308 __enable_ibs_on_vcpu(started_vcpu
);
5311 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
5315 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
5316 struct kvm_enable_cap
*cap
)
5324 case KVM_CAP_S390_CSS_SUPPORT
:
5325 if (!vcpu
->kvm
->arch
.css_support
) {
5326 vcpu
->kvm
->arch
.css_support
= 1;
5327 VM_EVENT(vcpu
->kvm
, 3, "%s", "ENABLE: CSS support");
5328 trace_kvm_s390_enable_css(vcpu
->kvm
);
5339 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu
*vcpu
,
5340 struct kvm_s390_mem_op
*mop
)
5342 void __user
*uaddr
= (void __user
*)mop
->buf
;
5346 if (mop
->flags
|| !mop
->size
)
5348 if (mop
->size
+ mop
->sida_offset
< mop
->size
)
5350 if (mop
->size
+ mop
->sida_offset
> sida_size(vcpu
->arch
.sie_block
))
5352 if (!kvm_s390_pv_cpu_is_protected(vcpu
))
5355 sida_addr
= (char *)sida_addr(vcpu
->arch
.sie_block
) + mop
->sida_offset
;
5358 case KVM_S390_MEMOP_SIDA_READ
:
5359 if (copy_to_user(uaddr
, sida_addr
, mop
->size
))
5363 case KVM_S390_MEMOP_SIDA_WRITE
:
5364 if (copy_from_user(sida_addr
, uaddr
, mop
->size
))
5371 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu
*vcpu
,
5372 struct kvm_s390_mem_op
*mop
)
5374 void __user
*uaddr
= (void __user
*)mop
->buf
;
5375 enum gacc_mode acc_mode
;
5376 void *tmpbuf
= NULL
;
5379 r
= mem_op_validate_common(mop
, KVM_S390_MEMOP_F_INJECT_EXCEPTION
|
5380 KVM_S390_MEMOP_F_CHECK_ONLY
|
5381 KVM_S390_MEMOP_F_SKEY_PROTECTION
);
5384 if (mop
->ar
>= NUM_ACRS
)
5386 if (kvm_s390_pv_cpu_is_protected(vcpu
))
5388 if (!(mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
)) {
5389 tmpbuf
= vmalloc(mop
->size
);
5394 acc_mode
= mop
->op
== KVM_S390_MEMOP_LOGICAL_READ
? GACC_FETCH
: GACC_STORE
;
5395 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
5396 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
, mop
->size
,
5397 acc_mode
, mop
->key
);
5400 if (acc_mode
== GACC_FETCH
) {
5401 r
= read_guest_with_key(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
,
5402 mop
->size
, mop
->key
);
5405 if (copy_to_user(uaddr
, tmpbuf
, mop
->size
)) {
5410 if (copy_from_user(tmpbuf
, uaddr
, mop
->size
)) {
5414 r
= write_guest_with_key(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
,
5415 mop
->size
, mop
->key
);
5419 if (r
> 0 && (mop
->flags
& KVM_S390_MEMOP_F_INJECT_EXCEPTION
) != 0)
5420 kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
5427 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu
*vcpu
,
5428 struct kvm_s390_mem_op
*mop
)
5432 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5435 case KVM_S390_MEMOP_LOGICAL_READ
:
5436 case KVM_S390_MEMOP_LOGICAL_WRITE
:
5437 r
= kvm_s390_vcpu_mem_op(vcpu
, mop
);
5439 case KVM_S390_MEMOP_SIDA_READ
:
5440 case KVM_S390_MEMOP_SIDA_WRITE
:
5441 /* we are locked against sida going away by the vcpu->mutex */
5442 r
= kvm_s390_vcpu_sida_op(vcpu
, mop
);
5448 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
5452 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
5453 unsigned int ioctl
, unsigned long arg
)
5455 struct kvm_vcpu
*vcpu
= filp
->private_data
;
5456 void __user
*argp
= (void __user
*)arg
;
5460 case KVM_S390_IRQ
: {
5461 struct kvm_s390_irq s390irq
;
5463 if (copy_from_user(&s390irq
, argp
, sizeof(s390irq
)))
5465 rc
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
5468 case KVM_S390_INTERRUPT
: {
5469 struct kvm_s390_interrupt s390int
;
5470 struct kvm_s390_irq s390irq
= {};
5472 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
5474 if (s390int_to_s390irq(&s390int
, &s390irq
))
5476 rc
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
5485 * To simplify single stepping of userspace-emulated instructions,
5486 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5487 * should_handle_per_ifetch()). However, if userspace emulation injects
5488 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5489 * after (and not before) the interrupt delivery.
5492 vcpu
->guest_debug
&= ~KVM_GUESTDBG_EXIT_PENDING
;
5497 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu
*vcpu
,
5498 struct kvm_pv_cmd
*cmd
)
5500 struct kvm_s390_pv_dmp dmp
;
5504 /* Dump initialization is a prerequisite */
5505 if (!vcpu
->kvm
->arch
.pv
.dumping
)
5508 if (copy_from_user(&dmp
, (__u8 __user
*)cmd
->data
, sizeof(dmp
)))
5511 /* We only handle this subcmd right now */
5512 if (dmp
.subcmd
!= KVM_PV_DUMP_CPU
)
5515 /* CPU dump length is the same as create cpu storage donation. */
5516 if (dmp
.buff_len
!= uv_info
.guest_cpu_stor_len
)
5519 data
= kvzalloc(uv_info
.guest_cpu_stor_len
, GFP_KERNEL
);
5523 ret
= kvm_s390_pv_dump_cpu(vcpu
, data
, &cmd
->rc
, &cmd
->rrc
);
5525 VCPU_EVENT(vcpu
, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5526 vcpu
->vcpu_id
, cmd
->rc
, cmd
->rrc
);
5531 /* On success copy over the dump data */
5532 if (!ret
&& copy_to_user((__u8 __user
*)dmp
.buff_addr
, data
, uv_info
.guest_cpu_stor_len
))
5539 long kvm_arch_vcpu_ioctl(struct file
*filp
,
5540 unsigned int ioctl
, unsigned long arg
)
5542 struct kvm_vcpu
*vcpu
= filp
->private_data
;
5543 void __user
*argp
= (void __user
*)arg
;
5551 case KVM_S390_STORE_STATUS
:
5552 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
5553 r
= kvm_s390_store_status_unloaded(vcpu
, arg
);
5554 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
5556 case KVM_S390_SET_INITIAL_PSW
: {
5560 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
5562 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
5565 case KVM_S390_CLEAR_RESET
:
5567 kvm_arch_vcpu_ioctl_clear_reset(vcpu
);
5568 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
5569 r
= uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu
),
5570 UVC_CMD_CPU_RESET_CLEAR
, &rc
, &rrc
);
5571 VCPU_EVENT(vcpu
, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5575 case KVM_S390_INITIAL_RESET
:
5577 kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
5578 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
5579 r
= uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu
),
5580 UVC_CMD_CPU_RESET_INITIAL
,
5582 VCPU_EVENT(vcpu
, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5586 case KVM_S390_NORMAL_RESET
:
5588 kvm_arch_vcpu_ioctl_normal_reset(vcpu
);
5589 if (kvm_s390_pv_cpu_is_protected(vcpu
)) {
5590 r
= uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu
),
5591 UVC_CMD_CPU_RESET
, &rc
, &rrc
);
5592 VCPU_EVENT(vcpu
, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5596 case KVM_SET_ONE_REG
:
5597 case KVM_GET_ONE_REG
: {
5598 struct kvm_one_reg reg
;
5600 if (kvm_s390_pv_cpu_is_protected(vcpu
))
5603 if (copy_from_user(®
, argp
, sizeof(reg
)))
5605 if (ioctl
== KVM_SET_ONE_REG
)
5606 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
5608 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
5611 #ifdef CONFIG_KVM_S390_UCONTROL
5612 case KVM_S390_UCAS_MAP
: {
5613 struct kvm_s390_ucas_mapping ucasmap
;
5615 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
5620 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
5625 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
5626 ucasmap
.vcpu_addr
, ucasmap
.length
);
5629 case KVM_S390_UCAS_UNMAP
: {
5630 struct kvm_s390_ucas_mapping ucasmap
;
5632 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
5637 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
5642 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
5647 case KVM_S390_VCPU_FAULT
: {
5648 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
5651 case KVM_ENABLE_CAP
:
5653 struct kvm_enable_cap cap
;
5655 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
5657 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
5660 case KVM_S390_MEM_OP
: {
5661 struct kvm_s390_mem_op mem_op
;
5663 if (copy_from_user(&mem_op
, argp
, sizeof(mem_op
)) == 0)
5664 r
= kvm_s390_vcpu_memsida_op(vcpu
, &mem_op
);
5669 case KVM_S390_SET_IRQ_STATE
: {
5670 struct kvm_s390_irq_state irq_state
;
5673 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
5675 if (irq_state
.len
> VCPU_IRQS_MAX_BUF
||
5676 irq_state
.len
== 0 ||
5677 irq_state
.len
% sizeof(struct kvm_s390_irq
) > 0) {
5681 /* do not use irq_state.flags, it will break old QEMUs */
5682 r
= kvm_s390_set_irq_state(vcpu
,
5683 (void __user
*) irq_state
.buf
,
5687 case KVM_S390_GET_IRQ_STATE
: {
5688 struct kvm_s390_irq_state irq_state
;
5691 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
5693 if (irq_state
.len
== 0) {
5697 /* do not use irq_state.flags, it will break old QEMUs */
5698 r
= kvm_s390_get_irq_state(vcpu
,
5699 (__u8 __user
*) irq_state
.buf
,
5703 case KVM_S390_PV_CPU_COMMAND
: {
5704 struct kvm_pv_cmd cmd
;
5707 if (!is_prot_virt_host())
5711 if (copy_from_user(&cmd
, argp
, sizeof(cmd
)))
5718 /* We only handle this cmd right now */
5719 if (cmd
.cmd
!= KVM_PV_DUMP
)
5722 r
= kvm_s390_handle_pv_vcpu_dump(vcpu
, &cmd
);
5724 /* Always copy over UV rc / rrc data */
5725 if (copy_to_user((__u8 __user
*)argp
, &cmd
.rc
,
5726 sizeof(cmd
.rc
) + sizeof(cmd
.rrc
)))
5738 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
5740 #ifdef CONFIG_KVM_S390_UCONTROL
5741 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
5742 && (kvm_is_ucontrol(vcpu
->kvm
))) {
5743 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
5744 get_page(vmf
->page
);
5748 return VM_FAULT_SIGBUS
;
5751 bool kvm_arch_irqchip_in_kernel(struct kvm
*kvm
)
5756 /* Section: memory related */
5757 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
5758 const struct kvm_memory_slot
*old
,
5759 struct kvm_memory_slot
*new,
5760 enum kvm_mr_change change
)
5764 /* When we are protected, we should not change the memory slots */
5765 if (kvm_s390_pv_get_handle(kvm
))
5768 if (change
!= KVM_MR_DELETE
&& change
!= KVM_MR_FLAGS_ONLY
) {
5770 * A few sanity checks. We can have memory slots which have to be
5771 * located/ended at a segment boundary (1MB). The memory in userland is
5772 * ok to be fragmented into various different vmas. It is okay to mmap()
5773 * and munmap() stuff in this slot after doing this call at any time
5776 if (new->userspace_addr
& 0xffffful
)
5779 size
= new->npages
* PAGE_SIZE
;
5780 if (size
& 0xffffful
)
5783 if ((new->base_gfn
* PAGE_SIZE
) + size
> kvm
->arch
.mem_limit
)
5787 if (!kvm
->arch
.migration_mode
)
5791 * Turn off migration mode when:
5792 * - userspace creates a new memslot with dirty logging off,
5793 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5794 * dirty logging is turned off.
5795 * Migration mode expects dirty page logging being enabled to store
5798 if (change
!= KVM_MR_DELETE
&&
5799 !(new->flags
& KVM_MEM_LOG_DIRTY_PAGES
))
5800 WARN(kvm_s390_vm_stop_migration(kvm
),
5801 "Failed to stop migration mode");
5806 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
5807 struct kvm_memory_slot
*old
,
5808 const struct kvm_memory_slot
*new,
5809 enum kvm_mr_change change
)
5815 rc
= gmap_unmap_segment(kvm
->arch
.gmap
, old
->base_gfn
* PAGE_SIZE
,
5816 old
->npages
* PAGE_SIZE
);
5819 rc
= gmap_unmap_segment(kvm
->arch
.gmap
, old
->base_gfn
* PAGE_SIZE
,
5820 old
->npages
* PAGE_SIZE
);
5825 rc
= gmap_map_segment(kvm
->arch
.gmap
, new->userspace_addr
,
5826 new->base_gfn
* PAGE_SIZE
,
5827 new->npages
* PAGE_SIZE
);
5829 case KVM_MR_FLAGS_ONLY
:
5832 WARN(1, "Unknown KVM MR CHANGE: %d\n", change
);
5835 pr_warn("failed to commit memory region\n");
5839 static inline unsigned long nonhyp_mask(int i
)
5841 unsigned int nonhyp_fai
= (sclp
.hmfai
<< i
* 2) >> 30;
5843 return 0x0000ffffffffffffUL
>> (nonhyp_fai
<< 4);
5846 static int __init
kvm_s390_init(void)
5850 if (!sclp
.has_sief2
) {
5851 pr_info("SIE is not available\n");
5855 if (nested
&& hpage
) {
5856 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5860 for (i
= 0; i
< 16; i
++)
5861 kvm_s390_fac_base
[i
] |=
5862 stfle_fac_list
[i
] & nonhyp_mask(i
);
5864 r
= __kvm_s390_init();
5868 r
= kvm_init(sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
5876 static void __exit
kvm_s390_exit(void)
5883 module_init(kvm_s390_init
);
5884 module_exit(kvm_s390_exit
);
5887 * Enable autoloading of the kvm module.
5888 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5889 * since x86 takes a different approach.
5891 #include <linux/miscdevice.h>
5892 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
5893 MODULE_ALIAS("devname:kvm");