]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - virt/kvm/kvm_main.c
KVM: stats: Add halt_wait_ns stats for all architectures
[thirdparty/kernel/stable.git] / virt / kvm / kvm_main.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
6aa8b732
AK
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9611c187 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
6aa8b732
AK
14 */
15
af669ac6 16#include <kvm/iodev.h>
6aa8b732 17
edf88417 18#include <linux/kvm_host.h>
6aa8b732
AK
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
6aa8b732 22#include <linux/percpu.h>
6aa8b732
AK
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
6aa8b732 26#include <linux/reboot.h>
6aa8b732
AK
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
fb3600cc 30#include <linux/syscore_ops.h>
774c47f1 31#include <linux/cpu.h>
174cd4b1 32#include <linux/sched/signal.h>
6e84f315 33#include <linux/sched/mm.h>
03441a34 34#include <linux/sched/stat.h>
d9e368d6
AK
35#include <linux/cpumask.h>
36#include <linux/smp.h>
d6d28168 37#include <linux/anon_inodes.h>
04d2cc77 38#include <linux/profile.h>
7aa81cc0 39#include <linux/kvm_para.h>
6fc138d2 40#include <linux/pagemap.h>
8d4e1288 41#include <linux/mman.h>
35149e21 42#include <linux/swap.h>
e56d532f 43#include <linux/bitops.h>
547de29e 44#include <linux/spinlock.h>
6ff5894c 45#include <linux/compat.h>
bc6678a3 46#include <linux/srcu.h>
8f0b1ab6 47#include <linux/hugetlb.h>
5a0e3ad6 48#include <linux/slab.h>
743eeb0b
SL
49#include <linux/sort.h>
50#include <linux/bsearch.h>
c011d23b 51#include <linux/io.h>
2eb06c30 52#include <linux/lockdep.h>
c57c8046 53#include <linux/kthread.h>
2fdef3a2 54#include <linux/suspend.h>
6aa8b732 55
e495606d 56#include <asm/processor.h>
2ea75be3 57#include <asm/ioctl.h>
7c0f6ba6 58#include <linux/uaccess.h>
6aa8b732 59
5f94c174 60#include "coalesced_mmio.h"
af585b92 61#include "async_pf.h"
531810ca 62#include "mmu_lock.h"
3c3c29fd 63#include "vfio.h"
5f94c174 64
229456fc
MT
65#define CREATE_TRACE_POINTS
66#include <trace/events/kvm.h>
67
fb04a1ed
PX
68#include <linux/kvm_dirty_ring.h>
69
536a6f88
JF
70/* Worst case buffer size needed for holding an integer. */
71#define ITOA_MAX_LEN 12
72
6aa8b732
AK
73MODULE_AUTHOR("Qumranet");
74MODULE_LICENSE("GPL");
75
920552b2 76/* Architectures should define their poll value according to the halt latency */
ec76d819 77unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
039c5d1b 78module_param(halt_poll_ns, uint, 0644);
ec76d819 79EXPORT_SYMBOL_GPL(halt_poll_ns);
f7819512 80
aca6ff29 81/* Default doubles per-vcpu halt_poll_ns. */
ec76d819 82unsigned int halt_poll_ns_grow = 2;
039c5d1b 83module_param(halt_poll_ns_grow, uint, 0644);
ec76d819 84EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
aca6ff29 85
49113d36
NW
86/* The start value to grow halt_poll_ns from */
87unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88module_param(halt_poll_ns_grow_start, uint, 0644);
89EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90
aca6ff29 91/* Default resets per-vcpu halt_poll_ns . */
ec76d819 92unsigned int halt_poll_ns_shrink;
039c5d1b 93module_param(halt_poll_ns_shrink, uint, 0644);
ec76d819 94EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
aca6ff29 95
fa40a821
MT
96/*
97 * Ordering of locks:
98 *
b7d409de 99 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
100 */
101
0d9ce162 102DEFINE_MUTEX(kvm_lock);
4a937f96 103static DEFINE_RAW_SPINLOCK(kvm_count_lock);
e9b11c17 104LIST_HEAD(vm_list);
133de902 105
7f59f492 106static cpumask_var_t cpus_hardware_enabled;
f4fee932 107static int kvm_usage_count;
10474ae8 108static atomic_t hardware_enable_failed;
1b6c0168 109
aaba298c 110static struct kmem_cache *kvm_vcpu_cache;
1165f5fe 111
15ad7146 112static __read_mostly struct preempt_ops kvm_preempt_ops;
7495e22b 113static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
15ad7146 114
76f7c879 115struct dentry *kvm_debugfs_dir;
e23a808b 116EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
6aa8b732 117
09cbcef6 118static const struct file_operations stat_fops_per_vm;
536a6f88 119
bccf2150
AK
120static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
121 unsigned long arg);
de8e5d74 122#ifdef CONFIG_KVM_COMPAT
1dda606c
AG
123static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
124 unsigned long arg);
7ddfd3e0
MZ
125#define KVM_COMPAT(c) .compat_ioctl = (c)
126#else
9cb09e7c
MZ
127/*
128 * For architectures that don't implement a compat infrastructure,
129 * adopt a double line of defense:
130 * - Prevent a compat task from opening /dev/kvm
131 * - If the open has been done by a 64bit task, and the KVM fd
132 * passed to a compat task, let the ioctls fail.
133 */
7ddfd3e0
MZ
134static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
135 unsigned long arg) { return -EINVAL; }
b9876e6d
MZ
136
137static int kvm_no_compat_open(struct inode *inode, struct file *file)
138{
139 return is_compat_task() ? -ENODEV : 0;
140}
141#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
142 .open = kvm_no_compat_open
1dda606c 143#endif
10474ae8
AG
144static int hardware_enable_all(void);
145static void hardware_disable_all(void);
bccf2150 146
e93f8a0f 147static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
7940876e 148
52480137 149__visible bool kvm_rebooting;
b7c4145b 150EXPORT_SYMBOL_GPL(kvm_rebooting);
4ecac3fd 151
286de8f6
CI
152#define KVM_EVENT_CREATE_VM 0
153#define KVM_EVENT_DESTROY_VM 1
154static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
155static unsigned long long kvm_createvm_count;
156static unsigned long long kvm_active_vms;
157
e649b3f0
ET
158__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
159 unsigned long start, unsigned long end)
b1394e74
RK
160{
161}
162
a78986aa
SC
163bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
164{
165 /*
166 * The metadata used by is_zone_device_page() to determine whether or
167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
168 * the device has been pinned, e.g. by get_user_pages(). WARN if the
169 * page_count() is zero to help detect bad usage of this helper.
170 */
171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
172 return false;
173
174 return is_zone_device_page(pfn_to_page(pfn));
175}
176
ba049e93 177bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
cbff90a7 178{
a78986aa
SC
179 /*
180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
181 * perspective they are "normal" pages, albeit with slightly different
182 * usage rules.
183 */
11feeb49 184 if (pfn_valid(pfn))
a78986aa 185 return PageReserved(pfn_to_page(pfn)) &&
7df003c8 186 !is_zero_pfn(pfn) &&
a78986aa 187 !kvm_is_zone_device_pfn(pfn);
cbff90a7
BAY
188
189 return true;
190}
191
005ba37c
SC
192bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
193{
194 struct page *page = pfn_to_page(pfn);
195
196 if (!PageTransCompoundMap(page))
197 return false;
198
199 return is_transparent_hugepage(compound_head(page));
200}
201
bccf2150
AK
202/*
203 * Switches to specified vcpu, until a matching vcpu_put()
204 */
ec7660cc 205void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 206{
ec7660cc 207 int cpu = get_cpu();
7495e22b
PB
208
209 __this_cpu_write(kvm_running_vcpu, vcpu);
15ad7146 210 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 211 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 212 put_cpu();
6aa8b732 213}
2f1fe811 214EXPORT_SYMBOL_GPL(vcpu_load);
6aa8b732 215
313a3dc7 216void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 217{
15ad7146 218 preempt_disable();
313a3dc7 219 kvm_arch_vcpu_put(vcpu);
15ad7146 220 preempt_notifier_unregister(&vcpu->preempt_notifier);
7495e22b 221 __this_cpu_write(kvm_running_vcpu, NULL);
15ad7146 222 preempt_enable();
6aa8b732 223}
2f1fe811 224EXPORT_SYMBOL_GPL(vcpu_put);
6aa8b732 225
7a97cec2
PB
226/* TODO: merge with kvm_arch_vcpu_should_kick */
227static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228{
229 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230
231 /*
232 * We need to wait for the VCPU to reenable interrupts and get out of
233 * READING_SHADOW_PAGE_TABLES mode.
234 */
235 if (req & KVM_REQUEST_WAIT)
236 return mode != OUTSIDE_GUEST_MODE;
237
238 /*
239 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 */
241 return mode == IN_GUEST_MODE;
242}
243
d9e368d6
AK
244static void ack_flush(void *_completed)
245{
d9e368d6
AK
246}
247
b49defe8
PB
248static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
249{
250 if (unlikely(!cpus))
251 cpus = cpu_online_mask;
252
253 if (cpumask_empty(cpus))
254 return false;
255
256 smp_call_function_many(cpus, ack_flush, NULL, wait);
257 return true;
258}
259
7053df4e 260bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
54163a34 261 struct kvm_vcpu *except,
7053df4e 262 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
d9e368d6 263{
597a5f55 264 int i, cpu, me;
d9e368d6 265 struct kvm_vcpu *vcpu;
7053df4e 266 bool called;
6ef7a1bc 267
3cba4130 268 me = get_cpu();
7053df4e 269
988a2cae 270 kvm_for_each_vcpu(i, vcpu, kvm) {
54163a34
SS
271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
272 vcpu == except)
7053df4e
VK
273 continue;
274
3cba4130 275 kvm_make_request(req, vcpu);
d9e368d6 276 cpu = vcpu->cpu;
6b7e2d09 277
178f02ff
RK
278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
279 continue;
6c6e8360 280
7053df4e 281 if (tmp != NULL && cpu != -1 && cpu != me &&
7a97cec2 282 kvm_request_needs_ipi(vcpu, req))
7053df4e 283 __cpumask_set_cpu(cpu, tmp);
49846896 284 }
7053df4e
VK
285
286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
3cba4130 287 put_cpu();
7053df4e
VK
288
289 return called;
290}
291
54163a34
SS
292bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
293 struct kvm_vcpu *except)
7053df4e
VK
294{
295 cpumask_var_t cpus;
296 bool called;
7053df4e
VK
297
298 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
299
54163a34 300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
7053df4e 301
6ef7a1bc 302 free_cpumask_var(cpus);
49846896 303 return called;
d9e368d6
AK
304}
305
54163a34
SS
306bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
307{
308 return kvm_make_all_cpus_request_except(kvm, req, NULL);
309}
a2486020 310EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
54163a34 311
a6d51016 312#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
49846896 313void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 314{
4ae3cb3a
LT
315 /*
316 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
317 * kvm_make_all_cpus_request.
318 */
319 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
320
321 /*
322 * We want to publish modifications to the page tables before reading
323 * mode. Pairs with a memory barrier in arch-specific code.
324 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
325 * and smp_mb in walk_shadow_page_lockless_begin/end.
326 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
327 *
328 * There is already an smp_mb__after_atomic() before
329 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
330 * barrier here.
331 */
b08660e5
TL
332 if (!kvm_arch_flush_remote_tlb(kvm)
333 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
0193cc90 334 ++kvm->stat.generic.remote_tlb_flush;
a086f6a1 335 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
2e53d63a 336}
2ba9f0d8 337EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
a6d51016 338#endif
2e53d63a 339
49846896
RR
340void kvm_reload_remote_mmus(struct kvm *kvm)
341{
445b8236 342 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
49846896 343}
2e53d63a 344
6926f95a
SC
345#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
346static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
347 gfp_t gfp_flags)
348{
349 gfp_flags |= mc->gfp_zero;
350
351 if (mc->kmem_cache)
352 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
353 else
354 return (void *)__get_free_page(gfp_flags);
355}
356
357int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
358{
359 void *obj;
360
361 if (mc->nobjs >= min)
362 return 0;
363 while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
364 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
365 if (!obj)
366 return mc->nobjs >= min ? 0 : -ENOMEM;
367 mc->objects[mc->nobjs++] = obj;
368 }
369 return 0;
370}
371
372int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
373{
374 return mc->nobjs;
375}
376
377void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
378{
379 while (mc->nobjs) {
380 if (mc->kmem_cache)
381 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
382 else
383 free_page((unsigned long)mc->objects[--mc->nobjs]);
384 }
385}
386
387void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
388{
389 void *p;
390
391 if (WARN_ON(!mc->nobjs))
392 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
393 else
394 p = mc->objects[--mc->nobjs];
395 BUG_ON(!p);
396 return p;
397}
398#endif
399
8bd826d6 400static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
fb3f0f51 401{
fb3f0f51
RR
402 mutex_init(&vcpu->mutex);
403 vcpu->cpu = -1;
fb3f0f51
RR
404 vcpu->kvm = kvm;
405 vcpu->vcpu_id = id;
34bb10b7 406 vcpu->pid = NULL;
da4ad88c 407 rcuwait_init(&vcpu->wait);
af585b92 408 kvm_async_pf_vcpu_init(vcpu);
fb3f0f51 409
bf9f6ac8
FW
410 vcpu->pre_pcpu = -1;
411 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
412
4c088493
R
413 kvm_vcpu_set_in_spin_loop(vcpu, false);
414 kvm_vcpu_set_dy_eligible(vcpu, false);
3a08a8f9 415 vcpu->preempted = false;
d73eb57b 416 vcpu->ready = false;
d5c48deb 417 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
fe22ed82 418 vcpu->last_used_slot = 0;
fb3f0f51 419}
fb3f0f51 420
4543bdc0
SC
421void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
422{
fb04a1ed 423 kvm_dirty_ring_free(&vcpu->dirty_ring);
4543bdc0 424 kvm_arch_vcpu_destroy(vcpu);
e529ef66 425
9941d224
SC
426 /*
427 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
428 * the vcpu->pid pointer, and at destruction time all file descriptors
429 * are already gone.
430 */
431 put_pid(rcu_dereference_protected(vcpu->pid, 1));
432
8bd826d6 433 free_page((unsigned long)vcpu->run);
e529ef66 434 kmem_cache_free(kvm_vcpu_cache, vcpu);
4543bdc0
SC
435}
436EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
437
e930bffe
AA
438#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
439static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
440{
441 return container_of(mn, struct kvm, mmu_notifier);
442}
443
e649b3f0
ET
444static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
445 struct mm_struct *mm,
446 unsigned long start, unsigned long end)
447{
448 struct kvm *kvm = mmu_notifier_to_kvm(mn);
449 int idx;
450
451 idx = srcu_read_lock(&kvm->srcu);
452 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
453 srcu_read_unlock(&kvm->srcu, idx);
454}
455
3039bcc7
SC
456typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
457
f922bd9b
SC
458typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
459 unsigned long end);
460
3039bcc7
SC
461struct kvm_hva_range {
462 unsigned long start;
463 unsigned long end;
464 pte_t pte;
465 hva_handler_t handler;
f922bd9b 466 on_lock_fn_t on_lock;
3039bcc7
SC
467 bool flush_on_ret;
468 bool may_block;
469};
470
f922bd9b
SC
471/*
472 * Use a dedicated stub instead of NULL to indicate that there is no callback
473 * function/handler. The compiler technically can't guarantee that a real
474 * function will have a non-zero address, and so it will generate code to
475 * check for !NULL, whereas comparing against a stub will be elided at compile
476 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
477 */
478static void kvm_null_fn(void)
479{
480
481}
482#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
483
3039bcc7
SC
484static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
485 const struct kvm_hva_range *range)
486{
8931a454 487 bool ret = false, locked = false;
f922bd9b 488 struct kvm_gfn_range gfn_range;
3039bcc7
SC
489 struct kvm_memory_slot *slot;
490 struct kvm_memslots *slots;
3039bcc7
SC
491 int i, idx;
492
f922bd9b
SC
493 /* A null handler is allowed if and only if on_lock() is provided. */
494 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
495 IS_KVM_NULL_FN(range->handler)))
496 return 0;
497
3039bcc7
SC
498 idx = srcu_read_lock(&kvm->srcu);
499
500 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
501 slots = __kvm_memslots(kvm, i);
502 kvm_for_each_memslot(slot, slots) {
503 unsigned long hva_start, hva_end;
504
505 hva_start = max(range->start, slot->userspace_addr);
506 hva_end = min(range->end, slot->userspace_addr +
507 (slot->npages << PAGE_SHIFT));
508 if (hva_start >= hva_end)
509 continue;
510
511 /*
512 * To optimize for the likely case where the address
513 * range is covered by zero or one memslots, don't
514 * bother making these conditional (to avoid writes on
515 * the second or later invocation of the handler).
516 */
517 gfn_range.pte = range->pte;
518 gfn_range.may_block = range->may_block;
519
520 /*
521 * {gfn(page) | page intersects with [hva_start, hva_end)} =
522 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
523 */
524 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
525 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
526 gfn_range.slot = slot;
527
8931a454
SC
528 if (!locked) {
529 locked = true;
530 KVM_MMU_LOCK(kvm);
071064f1
PB
531 if (!IS_KVM_NULL_FN(range->on_lock))
532 range->on_lock(kvm, range->start, range->end);
533 if (IS_KVM_NULL_FN(range->handler))
534 break;
8931a454 535 }
3039bcc7
SC
536 ret |= range->handler(kvm, &gfn_range);
537 }
538 }
539
540 if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
541 kvm_flush_remote_tlbs(kvm);
542
8931a454
SC
543 if (locked)
544 KVM_MMU_UNLOCK(kvm);
f922bd9b 545
3039bcc7
SC
546 srcu_read_unlock(&kvm->srcu, idx);
547
548 /* The notifiers are averse to booleans. :-( */
549 return (int)ret;
550}
551
552static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
553 unsigned long start,
554 unsigned long end,
555 pte_t pte,
556 hva_handler_t handler)
557{
558 struct kvm *kvm = mmu_notifier_to_kvm(mn);
559 const struct kvm_hva_range range = {
560 .start = start,
561 .end = end,
562 .pte = pte,
563 .handler = handler,
f922bd9b 564 .on_lock = (void *)kvm_null_fn,
3039bcc7
SC
565 .flush_on_ret = true,
566 .may_block = false,
567 };
3039bcc7 568
f922bd9b 569 return __kvm_handle_hva_range(kvm, &range);
3039bcc7
SC
570}
571
572static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
573 unsigned long start,
574 unsigned long end,
575 hva_handler_t handler)
576{
577 struct kvm *kvm = mmu_notifier_to_kvm(mn);
578 const struct kvm_hva_range range = {
579 .start = start,
580 .end = end,
581 .pte = __pte(0),
582 .handler = handler,
f922bd9b 583 .on_lock = (void *)kvm_null_fn,
3039bcc7
SC
584 .flush_on_ret = false,
585 .may_block = false,
586 };
3039bcc7 587
f922bd9b 588 return __kvm_handle_hva_range(kvm, &range);
3039bcc7 589}
3da0dd43
IE
590static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
591 struct mm_struct *mm,
592 unsigned long address,
593 pte_t pte)
594{
595 struct kvm *kvm = mmu_notifier_to_kvm(mn);
596
501b9185
SC
597 trace_kvm_set_spte_hva(address);
598
c13fda23 599 /*
52ac8b35 600 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
071064f1
PB
601 * If mmu_notifier_count is zero, then no in-progress invalidations,
602 * including this one, found a relevant memslot at start(); rechecking
603 * memslots here is unnecessary. Note, a false positive (count elevated
604 * by a different invalidation) is sub-optimal but functionally ok.
c13fda23 605 */
52ac8b35 606 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
071064f1
PB
607 if (!READ_ONCE(kvm->mmu_notifier_count))
608 return;
c13fda23 609
3039bcc7 610 kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
3da0dd43
IE
611}
612
edb298c6 613void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
f922bd9b 614 unsigned long end)
e930bffe 615{
e930bffe
AA
616 /*
617 * The count increase must become visible at unlock time as no
618 * spte can be established without taking the mmu_lock and
619 * count is also read inside the mmu_lock critical section.
620 */
621 kvm->mmu_notifier_count++;
4a42d848 622 if (likely(kvm->mmu_notifier_count == 1)) {
f922bd9b
SC
623 kvm->mmu_notifier_range_start = start;
624 kvm->mmu_notifier_range_end = end;
4a42d848
DS
625 } else {
626 /*
627 * Fully tracking multiple concurrent ranges has dimishing
628 * returns. Keep things simple and just find the minimal range
629 * which includes the current and new ranges. As there won't be
630 * enough information to subtract a range after its invalidate
631 * completes, any ranges invalidated concurrently will
632 * accumulate and persist until all outstanding invalidates
633 * complete.
634 */
635 kvm->mmu_notifier_range_start =
f922bd9b 636 min(kvm->mmu_notifier_range_start, start);
4a42d848 637 kvm->mmu_notifier_range_end =
f922bd9b 638 max(kvm->mmu_notifier_range_end, end);
4a42d848 639 }
f922bd9b 640}
edb298c6 641EXPORT_SYMBOL_GPL(kvm_inc_notifier_count);
3039bcc7 642
f922bd9b
SC
643static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
644 const struct mmu_notifier_range *range)
645{
646 struct kvm *kvm = mmu_notifier_to_kvm(mn);
647 const struct kvm_hva_range hva_range = {
648 .start = range->start,
649 .end = range->end,
650 .pte = __pte(0),
651 .handler = kvm_unmap_gfn_range,
652 .on_lock = kvm_inc_notifier_count,
653 .flush_on_ret = true,
654 .may_block = mmu_notifier_range_blockable(range),
655 };
565f3be2 656
f922bd9b
SC
657 trace_kvm_unmap_hva_range(range->start, range->end);
658
52ac8b35
PB
659 /*
660 * Prevent memslot modification between range_start() and range_end()
661 * so that conditionally locking provides the same result in both
662 * functions. Without that guarantee, the mmu_notifier_count
663 * adjustments will be imbalanced.
664 *
665 * Pairs with the decrement in range_end().
666 */
667 spin_lock(&kvm->mn_invalidate_lock);
668 kvm->mn_active_invalidate_count++;
669 spin_unlock(&kvm->mn_invalidate_lock);
670
f922bd9b 671 __kvm_handle_hva_range(kvm, &hva_range);
93065ac7 672
e649b3f0 673 return 0;
e930bffe
AA
674}
675
edb298c6 676void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
f922bd9b 677 unsigned long end)
e930bffe 678{
e930bffe
AA
679 /*
680 * This sequence increase will notify the kvm page fault that
681 * the page that is going to be mapped in the spte could have
682 * been freed.
683 */
684 kvm->mmu_notifier_seq++;
a355aa54 685 smp_wmb();
e930bffe
AA
686 /*
687 * The above sequence increase must be visible before the
a355aa54
PM
688 * below count decrease, which is ensured by the smp_wmb above
689 * in conjunction with the smp_rmb in mmu_notifier_retry().
e930bffe
AA
690 */
691 kvm->mmu_notifier_count--;
f922bd9b 692}
edb298c6
ML
693EXPORT_SYMBOL_GPL(kvm_dec_notifier_count);
694
f922bd9b
SC
695
696static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
697 const struct mmu_notifier_range *range)
698{
699 struct kvm *kvm = mmu_notifier_to_kvm(mn);
700 const struct kvm_hva_range hva_range = {
701 .start = range->start,
702 .end = range->end,
703 .pte = __pte(0),
704 .handler = (void *)kvm_null_fn,
705 .on_lock = kvm_dec_notifier_count,
706 .flush_on_ret = false,
707 .may_block = mmu_notifier_range_blockable(range),
708 };
52ac8b35 709 bool wake;
f922bd9b
SC
710
711 __kvm_handle_hva_range(kvm, &hva_range);
e930bffe 712
52ac8b35
PB
713 /* Pairs with the increment in range_start(). */
714 spin_lock(&kvm->mn_invalidate_lock);
715 wake = (--kvm->mn_active_invalidate_count == 0);
716 spin_unlock(&kvm->mn_invalidate_lock);
717
718 /*
719 * There can only be one waiter, since the wait happens under
720 * slots_lock.
721 */
722 if (wake)
723 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
724
e930bffe
AA
725 BUG_ON(kvm->mmu_notifier_count < 0);
726}
727
728static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
729 struct mm_struct *mm,
57128468
ALC
730 unsigned long start,
731 unsigned long end)
e930bffe 732{
501b9185
SC
733 trace_kvm_age_hva(start, end);
734
3039bcc7 735 return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
e930bffe
AA
736}
737
1d7715c6
VD
738static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
739 struct mm_struct *mm,
740 unsigned long start,
741 unsigned long end)
742{
501b9185
SC
743 trace_kvm_age_hva(start, end);
744
1d7715c6
VD
745 /*
746 * Even though we do not flush TLB, this will still adversely
747 * affect performance on pre-Haswell Intel EPT, where there is
748 * no EPT Access Bit to clear so that we have to tear down EPT
749 * tables instead. If we find this unacceptable, we can always
750 * add a parameter to kvm_age_hva so that it effectively doesn't
751 * do anything on clear_young.
752 *
753 * Also note that currently we never issue secondary TLB flushes
754 * from clear_young, leaving this job up to the regular system
755 * cadence. If we find this inaccurate, we might come up with a
756 * more sophisticated heuristic later.
757 */
3039bcc7 758 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
1d7715c6
VD
759}
760
8ee53820
AA
761static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
762 struct mm_struct *mm,
763 unsigned long address)
764{
501b9185
SC
765 trace_kvm_test_age_hva(address);
766
3039bcc7
SC
767 return kvm_handle_hva_range_no_flush(mn, address, address + 1,
768 kvm_test_age_gfn);
8ee53820
AA
769}
770
85db06e5
MT
771static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
772 struct mm_struct *mm)
773{
774 struct kvm *kvm = mmu_notifier_to_kvm(mn);
eda2beda
LJ
775 int idx;
776
777 idx = srcu_read_lock(&kvm->srcu);
2df72e9b 778 kvm_arch_flush_shadow_all(kvm);
eda2beda 779 srcu_read_unlock(&kvm->srcu, idx);
85db06e5
MT
780}
781
e930bffe 782static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
e649b3f0 783 .invalidate_range = kvm_mmu_notifier_invalidate_range,
e930bffe
AA
784 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
785 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
786 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
1d7715c6 787 .clear_young = kvm_mmu_notifier_clear_young,
8ee53820 788 .test_young = kvm_mmu_notifier_test_young,
3da0dd43 789 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 790 .release = kvm_mmu_notifier_release,
e930bffe 791};
4c07b0a4
AK
792
793static int kvm_init_mmu_notifier(struct kvm *kvm)
794{
795 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
796 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
797}
798
799#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
800
801static int kvm_init_mmu_notifier(struct kvm *kvm)
802{
803 return 0;
804}
805
e930bffe
AA
806#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
807
2fdef3a2
SS
808#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
809static int kvm_pm_notifier_call(struct notifier_block *bl,
810 unsigned long state,
811 void *unused)
812{
813 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
814
815 return kvm_arch_pm_notifier(kvm, state);
816}
817
818static void kvm_init_pm_notifier(struct kvm *kvm)
819{
820 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
821 /* Suspend KVM before we suspend ftrace, RCU, etc. */
822 kvm->pm_notifier.priority = INT_MAX;
823 register_pm_notifier(&kvm->pm_notifier);
824}
825
826static void kvm_destroy_pm_notifier(struct kvm *kvm)
827{
828 unregister_pm_notifier(&kvm->pm_notifier);
829}
830#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
831static void kvm_init_pm_notifier(struct kvm *kvm)
832{
833}
834
835static void kvm_destroy_pm_notifier(struct kvm *kvm)
836{
837}
838#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
839
a47d2b07 840static struct kvm_memslots *kvm_alloc_memslots(void)
bf3e05bc
XG
841{
842 int i;
a47d2b07 843 struct kvm_memslots *slots;
bf3e05bc 844
b12ce36a 845 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
a47d2b07
PB
846 if (!slots)
847 return NULL;
848
bf3e05bc 849 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
36947254 850 slots->id_to_index[i] = -1;
a47d2b07
PB
851
852 return slots;
853}
854
855static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
856{
857 if (!memslot->dirty_bitmap)
858 return;
859
860 kvfree(memslot->dirty_bitmap);
861 memslot->dirty_bitmap = NULL;
862}
863
e96c81ee 864static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
a47d2b07 865{
e96c81ee 866 kvm_destroy_dirty_bitmap(slot);
a47d2b07 867
e96c81ee 868 kvm_arch_free_memslot(kvm, slot);
a47d2b07 869
e96c81ee
SC
870 slot->flags = 0;
871 slot->npages = 0;
a47d2b07
PB
872}
873
874static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
875{
876 struct kvm_memory_slot *memslot;
877
878 if (!slots)
879 return;
880
881 kvm_for_each_memslot(memslot, slots)
e96c81ee 882 kvm_free_memslot(kvm, memslot);
a47d2b07
PB
883
884 kvfree(slots);
bf3e05bc
XG
885}
886
bc9e9e67
JZ
887static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
888{
889 switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
890 case KVM_STATS_TYPE_INSTANT:
891 return 0444;
892 case KVM_STATS_TYPE_CUMULATIVE:
893 case KVM_STATS_TYPE_PEAK:
894 default:
895 return 0644;
896 }
897}
898
899
536a6f88
JF
900static void kvm_destroy_vm_debugfs(struct kvm *kvm)
901{
902 int i;
bc9e9e67
JZ
903 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
904 kvm_vcpu_stats_header.num_desc;
536a6f88
JF
905
906 if (!kvm->debugfs_dentry)
907 return;
908
909 debugfs_remove_recursive(kvm->debugfs_dentry);
910
9d5a1dce
LC
911 if (kvm->debugfs_stat_data) {
912 for (i = 0; i < kvm_debugfs_num_entries; i++)
913 kfree(kvm->debugfs_stat_data[i]);
914 kfree(kvm->debugfs_stat_data);
915 }
536a6f88
JF
916}
917
918static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
919{
85cd39af
PB
920 static DEFINE_MUTEX(kvm_debugfs_lock);
921 struct dentry *dent;
536a6f88
JF
922 char dir_name[ITOA_MAX_LEN * 2];
923 struct kvm_stat_data *stat_data;
bc9e9e67 924 const struct _kvm_stats_desc *pdesc;
3165af73 925 int i, ret;
bc9e9e67
JZ
926 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
927 kvm_vcpu_stats_header.num_desc;
536a6f88
JF
928
929 if (!debugfs_initialized())
930 return 0;
931
932 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
85cd39af
PB
933 mutex_lock(&kvm_debugfs_lock);
934 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
935 if (dent) {
936 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
937 dput(dent);
938 mutex_unlock(&kvm_debugfs_lock);
939 return 0;
940 }
941 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
942 mutex_unlock(&kvm_debugfs_lock);
943 if (IS_ERR(dent))
944 return 0;
536a6f88 945
85cd39af 946 kvm->debugfs_dentry = dent;
536a6f88
JF
947 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
948 sizeof(*kvm->debugfs_stat_data),
b12ce36a 949 GFP_KERNEL_ACCOUNT);
536a6f88
JF
950 if (!kvm->debugfs_stat_data)
951 return -ENOMEM;
952
bc9e9e67
JZ
953 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
954 pdesc = &kvm_vm_stats_desc[i];
b12ce36a 955 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
536a6f88
JF
956 if (!stat_data)
957 return -ENOMEM;
958
959 stat_data->kvm = kvm;
bc9e9e67
JZ
960 stat_data->desc = pdesc;
961 stat_data->kind = KVM_STAT_VM;
962 kvm->debugfs_stat_data[i] = stat_data;
963 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
964 kvm->debugfs_dentry, stat_data,
965 &stat_fops_per_vm);
966 }
967
968 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
969 pdesc = &kvm_vcpu_stats_desc[i];
b12ce36a 970 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
536a6f88
JF
971 if (!stat_data)
972 return -ENOMEM;
973
974 stat_data->kvm = kvm;
bc9e9e67
JZ
975 stat_data->desc = pdesc;
976 stat_data->kind = KVM_STAT_VCPU;
004d62eb 977 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
bc9e9e67 978 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
09cbcef6
MP
979 kvm->debugfs_dentry, stat_data,
980 &stat_fops_per_vm);
536a6f88 981 }
3165af73
PX
982
983 ret = kvm_arch_create_vm_debugfs(kvm);
984 if (ret) {
985 kvm_destroy_vm_debugfs(kvm);
986 return i;
987 }
988
536a6f88
JF
989 return 0;
990}
991
1aa9b957
JS
992/*
993 * Called after the VM is otherwise initialized, but just before adding it to
994 * the vm_list.
995 */
996int __weak kvm_arch_post_init_vm(struct kvm *kvm)
997{
998 return 0;
999}
1000
1001/*
1002 * Called just after removing the VM from the vm_list, but before doing any
1003 * other destruction.
1004 */
1005void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1006{
1007}
1008
3165af73
PX
1009/*
1010 * Called after per-vm debugfs created. When called kvm->debugfs_dentry should
1011 * be setup already, so we can create arch-specific debugfs entries under it.
1012 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1013 * a per-arch destroy interface is not needed.
1014 */
1015int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1016{
1017 return 0;
1018}
1019
e08b9637 1020static struct kvm *kvm_create_vm(unsigned long type)
6aa8b732 1021{
d89f5eff 1022 struct kvm *kvm = kvm_arch_alloc_vm();
9121923c
JM
1023 int r = -ENOMEM;
1024 int i;
6aa8b732 1025
d89f5eff
JK
1026 if (!kvm)
1027 return ERR_PTR(-ENOMEM);
1028
531810ca 1029 KVM_MMU_LOCK_INIT(kvm);
f1f10076 1030 mmgrab(current->mm);
e9ad4ec8
PB
1031 kvm->mm = current->mm;
1032 kvm_eventfd_init(kvm);
1033 mutex_init(&kvm->lock);
1034 mutex_init(&kvm->irq_lock);
1035 mutex_init(&kvm->slots_lock);
b10a038e 1036 mutex_init(&kvm->slots_arch_lock);
52ac8b35
PB
1037 spin_lock_init(&kvm->mn_invalidate_lock);
1038 rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1039
e9ad4ec8
PB
1040 INIT_LIST_HEAD(&kvm->devices);
1041
1e702d9a
AW
1042 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1043
8a44119a
PB
1044 if (init_srcu_struct(&kvm->srcu))
1045 goto out_err_no_srcu;
1046 if (init_srcu_struct(&kvm->irq_srcu))
1047 goto out_err_no_irq_srcu;
1048
e2d3fcaf 1049 refcount_set(&kvm->users_count, 1);
f481b069 1050 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
4bd518f1 1051 struct kvm_memslots *slots = kvm_alloc_memslots();
9121923c 1052
4bd518f1 1053 if (!slots)
a97b0e77 1054 goto out_err_no_arch_destroy_vm;
0e32958e 1055 /* Generations must be different for each address space. */
164bf7e5 1056 slots->generation = i;
4bd518f1 1057 rcu_assign_pointer(kvm->memslots[i], slots);
f481b069 1058 }
00f034a1 1059
e93f8a0f 1060 for (i = 0; i < KVM_NR_BUSES; i++) {
4a12f951 1061 rcu_assign_pointer(kvm->buses[i],
b12ce36a 1062 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
57e7fbee 1063 if (!kvm->buses[i])
a97b0e77 1064 goto out_err_no_arch_destroy_vm;
e93f8a0f 1065 }
e930bffe 1066
acd05785
DM
1067 kvm->max_halt_poll_ns = halt_poll_ns;
1068
e08b9637 1069 r = kvm_arch_init_vm(kvm, type);
d89f5eff 1070 if (r)
a97b0e77 1071 goto out_err_no_arch_destroy_vm;
10474ae8
AG
1072
1073 r = hardware_enable_all();
1074 if (r)
719d93cd 1075 goto out_err_no_disable;
10474ae8 1076
c77dcacb 1077#ifdef CONFIG_HAVE_KVM_IRQFD
136bdfee 1078 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 1079#endif
6aa8b732 1080
74b5c5bf 1081 r = kvm_init_mmu_notifier(kvm);
1aa9b957
JS
1082 if (r)
1083 goto out_err_no_mmu_notifier;
1084
1085 r = kvm_arch_post_init_vm(kvm);
74b5c5bf
MW
1086 if (r)
1087 goto out_err;
1088
0d9ce162 1089 mutex_lock(&kvm_lock);
5e58cfe4 1090 list_add(&kvm->vm_list, &vm_list);
0d9ce162 1091 mutex_unlock(&kvm_lock);
d89f5eff 1092
2ecd9d29 1093 preempt_notifier_inc();
2fdef3a2 1094 kvm_init_pm_notifier(kvm);
2ecd9d29 1095
f17abe9a 1096 return kvm;
10474ae8
AG
1097
1098out_err:
1aa9b957
JS
1099#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1100 if (kvm->mmu_notifier.ops)
1101 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1102#endif
1103out_err_no_mmu_notifier:
10474ae8 1104 hardware_disable_all();
719d93cd 1105out_err_no_disable:
a97b0e77 1106 kvm_arch_destroy_vm(kvm);
a97b0e77 1107out_err_no_arch_destroy_vm:
e2d3fcaf 1108 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
e93f8a0f 1109 for (i = 0; i < KVM_NR_BUSES; i++)
3898da94 1110 kfree(kvm_get_bus(kvm, i));
f481b069 1111 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
3898da94 1112 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
8a44119a
PB
1113 cleanup_srcu_struct(&kvm->irq_srcu);
1114out_err_no_irq_srcu:
1115 cleanup_srcu_struct(&kvm->srcu);
1116out_err_no_srcu:
d89f5eff 1117 kvm_arch_free_vm(kvm);
e9ad4ec8 1118 mmdrop(current->mm);
10474ae8 1119 return ERR_PTR(r);
f17abe9a
AK
1120}
1121
07f0a7bd
SW
1122static void kvm_destroy_devices(struct kvm *kvm)
1123{
e6e3b5a6 1124 struct kvm_device *dev, *tmp;
07f0a7bd 1125
a28ebea2
CD
1126 /*
1127 * We do not need to take the kvm->lock here, because nobody else
1128 * has a reference to the struct kvm at this point and therefore
1129 * cannot access the devices list anyhow.
1130 */
e6e3b5a6
GT
1131 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1132 list_del(&dev->vm_node);
07f0a7bd
SW
1133 dev->ops->destroy(dev);
1134 }
1135}
1136
f17abe9a
AK
1137static void kvm_destroy_vm(struct kvm *kvm)
1138{
e93f8a0f 1139 int i;
6d4e4c4f
AK
1140 struct mm_struct *mm = kvm->mm;
1141
2fdef3a2 1142 kvm_destroy_pm_notifier(kvm);
286de8f6 1143 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
536a6f88 1144 kvm_destroy_vm_debugfs(kvm);
ad8ba2cd 1145 kvm_arch_sync_events(kvm);
0d9ce162 1146 mutex_lock(&kvm_lock);
133de902 1147 list_del(&kvm->vm_list);
0d9ce162 1148 mutex_unlock(&kvm_lock);
1aa9b957
JS
1149 kvm_arch_pre_destroy_vm(kvm);
1150
399ec807 1151 kvm_free_irq_routing(kvm);
df630b8c 1152 for (i = 0; i < KVM_NR_BUSES; i++) {
3898da94 1153 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
4a12f951 1154
4a12f951
CB
1155 if (bus)
1156 kvm_io_bus_destroy(bus);
df630b8c
PX
1157 kvm->buses[i] = NULL;
1158 }
980da6ce 1159 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
1160#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1161 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
52ac8b35
PB
1162 /*
1163 * At this point, pending calls to invalidate_range_start()
1164 * have completed but no more MMU notifiers will run, so
1165 * mn_active_invalidate_count may remain unbalanced.
1166 * No threads can be waiting in install_new_memslots as the
1167 * last reference on KVM has been dropped, but freeing
1168 * memslots would deadlock without this manual intervention.
1169 */
1170 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1171 kvm->mn_active_invalidate_count = 0;
f00be0ca 1172#else
2df72e9b 1173 kvm_arch_flush_shadow_all(kvm);
5f94c174 1174#endif
d19a9cd2 1175 kvm_arch_destroy_vm(kvm);
07f0a7bd 1176 kvm_destroy_devices(kvm);
f481b069 1177 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
3898da94 1178 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
820b3fcd 1179 cleanup_srcu_struct(&kvm->irq_srcu);
d89f5eff
JK
1180 cleanup_srcu_struct(&kvm->srcu);
1181 kvm_arch_free_vm(kvm);
2ecd9d29 1182 preempt_notifier_dec();
10474ae8 1183 hardware_disable_all();
6d4e4c4f 1184 mmdrop(mm);
f17abe9a
AK
1185}
1186
d39f13b0
IE
1187void kvm_get_kvm(struct kvm *kvm)
1188{
e3736c3e 1189 refcount_inc(&kvm->users_count);
d39f13b0
IE
1190}
1191EXPORT_SYMBOL_GPL(kvm_get_kvm);
1192
605c7130
PX
1193/*
1194 * Make sure the vm is not during destruction, which is a safe version of
1195 * kvm_get_kvm(). Return true if kvm referenced successfully, false otherwise.
1196 */
1197bool kvm_get_kvm_safe(struct kvm *kvm)
1198{
1199 return refcount_inc_not_zero(&kvm->users_count);
1200}
1201EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1202
d39f13b0
IE
1203void kvm_put_kvm(struct kvm *kvm)
1204{
e3736c3e 1205 if (refcount_dec_and_test(&kvm->users_count))
d39f13b0
IE
1206 kvm_destroy_vm(kvm);
1207}
1208EXPORT_SYMBOL_GPL(kvm_put_kvm);
1209
149487bd
SC
1210/*
1211 * Used to put a reference that was taken on behalf of an object associated
1212 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1213 * of the new file descriptor fails and the reference cannot be transferred to
1214 * its final owner. In such cases, the caller is still actively using @kvm and
1215 * will fail miserably if the refcount unexpectedly hits zero.
1216 */
1217void kvm_put_kvm_no_destroy(struct kvm *kvm)
1218{
1219 WARN_ON(refcount_dec_and_test(&kvm->users_count));
1220}
1221EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
d39f13b0 1222
f17abe9a
AK
1223static int kvm_vm_release(struct inode *inode, struct file *filp)
1224{
1225 struct kvm *kvm = filp->private_data;
1226
721eecbf
GH
1227 kvm_irqfd_release(kvm);
1228
d39f13b0 1229 kvm_put_kvm(kvm);
6aa8b732
AK
1230 return 0;
1231}
1232
515a0127
TY
1233/*
1234 * Allocation size is twice as large as the actual dirty bitmap size.
0dff0846 1235 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
515a0127 1236 */
3c9bd400 1237static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
a36a57b1 1238{
515a0127 1239 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
a36a57b1 1240
b12ce36a 1241 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
a36a57b1
TY
1242 if (!memslot->dirty_bitmap)
1243 return -ENOMEM;
1244
a36a57b1
TY
1245 return 0;
1246}
1247
bf3e05bc 1248/*
0577d1ab
SC
1249 * Delete a memslot by decrementing the number of used slots and shifting all
1250 * other entries in the array forward one spot.
bf3e05bc 1251 */
0577d1ab
SC
1252static inline void kvm_memslot_delete(struct kvm_memslots *slots,
1253 struct kvm_memory_slot *memslot)
bf3e05bc 1254{
063584d4 1255 struct kvm_memory_slot *mslots = slots->memslots;
0577d1ab 1256 int i;
f85e2cb5 1257
0577d1ab
SC
1258 if (WARN_ON(slots->id_to_index[memslot->id] == -1))
1259 return;
0e60b079 1260
0577d1ab
SC
1261 slots->used_slots--;
1262
87689270
DM
1263 if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
1264 atomic_set(&slots->last_used_slot, 0);
0774a964 1265
0577d1ab 1266 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
7f379cff
IM
1267 mslots[i] = mslots[i + 1];
1268 slots->id_to_index[mslots[i].id] = i;
7f379cff 1269 }
0577d1ab
SC
1270 mslots[i] = *memslot;
1271 slots->id_to_index[memslot->id] = -1;
1272}
1273
1274/*
1275 * "Insert" a new memslot by incrementing the number of used slots. Returns
1276 * the new slot's initial index into the memslots array.
1277 */
1278static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
1279{
1280 return slots->used_slots++;
1281}
1282
1283/*
1284 * Move a changed memslot backwards in the array by shifting existing slots
1285 * with a higher GFN toward the front of the array. Note, the changed memslot
1286 * itself is not preserved in the array, i.e. not swapped at this time, only
1287 * its new index into the array is tracked. Returns the changed memslot's
1288 * current index into the memslots array.
1289 */
1290static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
1291 struct kvm_memory_slot *memslot)
1292{
1293 struct kvm_memory_slot *mslots = slots->memslots;
1294 int i;
1295
1296 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
1297 WARN_ON_ONCE(!slots->used_slots))
1298 return -1;
efbeec70
PB
1299
1300 /*
0577d1ab
SC
1301 * Move the target memslot backward in the array by shifting existing
1302 * memslots with a higher GFN (than the target memslot) towards the
1303 * front of the array.
efbeec70 1304 */
0577d1ab
SC
1305 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
1306 if (memslot->base_gfn > mslots[i + 1].base_gfn)
1307 break;
1308
1309 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
f85e2cb5 1310
0577d1ab
SC
1311 /* Shift the next memslot forward one and update its index. */
1312 mslots[i] = mslots[i + 1];
1313 slots->id_to_index[mslots[i].id] = i;
1314 }
1315 return i;
1316}
1317
1318/*
1319 * Move a changed memslot forwards in the array by shifting existing slots with
1320 * a lower GFN toward the back of the array. Note, the changed memslot itself
1321 * is not preserved in the array, i.e. not swapped at this time, only its new
1322 * index into the array is tracked. Returns the changed memslot's final index
1323 * into the memslots array.
1324 */
1325static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1326 struct kvm_memory_slot *memslot,
1327 int start)
1328{
1329 struct kvm_memory_slot *mslots = slots->memslots;
1330 int i;
1331
1332 for (i = start; i > 0; i--) {
1333 if (memslot->base_gfn < mslots[i - 1].base_gfn)
1334 break;
1335
1336 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
1337
1338 /* Shift the next memslot back one and update its index. */
1339 mslots[i] = mslots[i - 1];
1340 slots->id_to_index[mslots[i].id] = i;
1341 }
1342 return i;
1343}
1344
1345/*
1346 * Re-sort memslots based on their GFN to account for an added, deleted, or
1347 * moved memslot. Sorting memslots by GFN allows using a binary search during
1348 * memslot lookup.
1349 *
1350 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry
1351 * at memslots[0] has the highest GFN.
1352 *
1353 * The sorting algorithm takes advantage of having initially sorted memslots
1354 * and knowing the position of the changed memslot. Sorting is also optimized
1355 * by not swapping the updated memslot and instead only shifting other memslots
1356 * and tracking the new index for the update memslot. Only once its final
1357 * index is known is the updated memslot copied into its position in the array.
1358 *
1359 * - When deleting a memslot, the deleted memslot simply needs to be moved to
1360 * the end of the array.
1361 *
1362 * - When creating a memslot, the algorithm "inserts" the new memslot at the
1363 * end of the array and then it forward to its correct location.
1364 *
1365 * - When moving a memslot, the algorithm first moves the updated memslot
1366 * backward to handle the scenario where the memslot's GFN was changed to a
1367 * lower value. update_memslots() then falls through and runs the same flow
1368 * as creating a memslot to move the memslot forward to handle the scenario
1369 * where its GFN was changed to a higher value.
1370 *
1371 * Note, slots are sorted from highest->lowest instead of lowest->highest for
1372 * historical reasons. Originally, invalid memslots where denoted by having
1373 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
1374 * to the end of the array. The current algorithm uses dedicated logic to
1375 * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1376 *
1377 * The other historical motiviation for highest->lowest was to improve the
1378 * performance of memslot lookup. KVM originally used a linear search starting
1379 * at memslots[0]. On x86, the largest memslot usually has one of the highest,
1380 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
1381 * single memslot above the 4gb boundary. As the largest memslot is also the
1382 * most likely to be referenced, sorting it to the front of the array was
1383 * advantageous. The current binary search starts from the middle of the array
1384 * and uses an LRU pointer to improve performance for all memslots and GFNs.
1385 */
1386static void update_memslots(struct kvm_memslots *slots,
1387 struct kvm_memory_slot *memslot,
1388 enum kvm_mr_change change)
1389{
1390 int i;
1391
1392 if (change == KVM_MR_DELETE) {
1393 kvm_memslot_delete(slots, memslot);
1394 } else {
1395 if (change == KVM_MR_CREATE)
1396 i = kvm_memslot_insert_back(slots);
1397 else
1398 i = kvm_memslot_move_backward(slots, memslot);
1399 i = kvm_memslot_move_forward(slots, memslot, i);
1400
1401 /*
1402 * Copy the memslot to its new position in memslots and update
1403 * its index accordingly.
1404 */
1405 slots->memslots[i] = *memslot;
1406 slots->id_to_index[memslot->id] = i;
1407 }
bf3e05bc
XG
1408}
1409
09170a49 1410static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
a50d64d6 1411{
4d8b81ab
XG
1412 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1413
0f8a4de3 1414#ifdef __KVM_HAVE_READONLY_MEM
4d8b81ab
XG
1415 valid_flags |= KVM_MEM_READONLY;
1416#endif
1417
1418 if (mem->flags & ~valid_flags)
a50d64d6
XG
1419 return -EINVAL;
1420
1421 return 0;
1422}
1423
7ec4fb44 1424static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
f481b069 1425 int as_id, struct kvm_memslots *slots)
7ec4fb44 1426{
f481b069 1427 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
361209e0 1428 u64 gen = old_memslots->generation;
7ec4fb44 1429
361209e0
SC
1430 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1431 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
ee3d1570 1432
52ac8b35
PB
1433 /*
1434 * Do not store the new memslots while there are invalidations in
071064f1
PB
1435 * progress, otherwise the locking in invalidate_range_start and
1436 * invalidate_range_end will be unbalanced.
52ac8b35
PB
1437 */
1438 spin_lock(&kvm->mn_invalidate_lock);
1439 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1440 while (kvm->mn_active_invalidate_count) {
1441 set_current_state(TASK_UNINTERRUPTIBLE);
1442 spin_unlock(&kvm->mn_invalidate_lock);
1443 schedule();
1444 spin_lock(&kvm->mn_invalidate_lock);
1445 }
1446 finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
f481b069 1447 rcu_assign_pointer(kvm->memslots[as_id], slots);
52ac8b35 1448 spin_unlock(&kvm->mn_invalidate_lock);
b10a038e
BG
1449
1450 /*
1451 * Acquired in kvm_set_memslot. Must be released before synchronize
1452 * SRCU below in order to avoid deadlock with another thread
1453 * acquiring the slots_arch_lock in an srcu critical section.
1454 */
1455 mutex_unlock(&kvm->slots_arch_lock);
1456
7ec4fb44 1457 synchronize_srcu_expedited(&kvm->srcu);
e59dbe09 1458
ee3d1570 1459 /*
361209e0 1460 * Increment the new memslot generation a second time, dropping the
00116795 1461 * update in-progress flag and incrementing the generation based on
361209e0
SC
1462 * the number of address spaces. This provides a unique and easily
1463 * identifiable generation number while the memslots are in flux.
1464 */
1465 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1466
1467 /*
4bd518f1
PB
1468 * Generations must be unique even across address spaces. We do not need
1469 * a global counter for that, instead the generation space is evenly split
1470 * across address spaces. For example, with two address spaces, address
164bf7e5
SC
1471 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1472 * use generations 1, 3, 5, ...
ee3d1570 1473 */
164bf7e5 1474 gen += KVM_ADDRESS_SPACE_NUM;
ee3d1570 1475
15248258 1476 kvm_arch_memslots_updated(kvm, gen);
ee3d1570 1477
15248258 1478 slots->generation = gen;
e59dbe09
TY
1479
1480 return old_memslots;
7ec4fb44
GN
1481}
1482
ddc12f2a
BG
1483static size_t kvm_memslots_size(int slots)
1484{
1485 return sizeof(struct kvm_memslots) +
1486 (sizeof(struct kvm_memory_slot) * slots);
1487}
1488
1489static void kvm_copy_memslots(struct kvm_memslots *to,
1490 struct kvm_memslots *from)
1491{
1492 memcpy(to, from, kvm_memslots_size(from->used_slots));
1493}
1494
36947254
SC
1495/*
1496 * Note, at a minimum, the current number of used slots must be allocated, even
1497 * when deleting a memslot, as we need a complete duplicate of the memslots for
1498 * use when invalidating a memslot prior to deleting/moving the memslot.
1499 */
1500static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
1501 enum kvm_mr_change change)
1502{
1503 struct kvm_memslots *slots;
ddc12f2a 1504 size_t new_size;
36947254
SC
1505
1506 if (change == KVM_MR_CREATE)
ddc12f2a 1507 new_size = kvm_memslots_size(old->used_slots + 1);
36947254 1508 else
ddc12f2a 1509 new_size = kvm_memslots_size(old->used_slots);
36947254
SC
1510
1511 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1512 if (likely(slots))
ddc12f2a 1513 kvm_copy_memslots(slots, old);
36947254
SC
1514
1515 return slots;
1516}
1517
cf47f50b
SC
1518static int kvm_set_memslot(struct kvm *kvm,
1519 const struct kvm_userspace_memory_region *mem,
9d4c197c 1520 struct kvm_memory_slot *old,
cf47f50b
SC
1521 struct kvm_memory_slot *new, int as_id,
1522 enum kvm_mr_change change)
1523{
1524 struct kvm_memory_slot *slot;
1525 struct kvm_memslots *slots;
1526 int r;
1527
b10a038e
BG
1528 /*
1529 * Released in install_new_memslots.
1530 *
1531 * Must be held from before the current memslots are copied until
1532 * after the new memslots are installed with rcu_assign_pointer,
1533 * then released before the synchronize srcu in install_new_memslots.
1534 *
1535 * When modifying memslots outside of the slots_lock, must be held
1536 * before reading the pointer to the current memslots until after all
1537 * changes to those memslots are complete.
1538 *
1539 * These rules ensure that installing new memslots does not lose
1540 * changes made to the previous memslots.
1541 */
1542 mutex_lock(&kvm->slots_arch_lock);
1543
36947254 1544 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
b10a038e
BG
1545 if (!slots) {
1546 mutex_unlock(&kvm->slots_arch_lock);
cf47f50b 1547 return -ENOMEM;
b10a038e 1548 }
cf47f50b
SC
1549
1550 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1551 /*
1552 * Note, the INVALID flag needs to be in the appropriate entry
1553 * in the freshly allocated memslots, not in @old or @new.
1554 */
1555 slot = id_to_memslot(slots, old->id);
1556 slot->flags |= KVM_MEMSLOT_INVALID;
1557
1558 /*
b10a038e
BG
1559 * We can re-use the memory from the old memslots.
1560 * It will be overwritten with a copy of the new memslots
1561 * after reacquiring the slots_arch_lock below.
cf47f50b
SC
1562 */
1563 slots = install_new_memslots(kvm, as_id, slots);
1564
1565 /* From this point no new shadow pages pointing to a deleted,
1566 * or moved, memslot will be created.
1567 *
1568 * validation of sp->gfn happens in:
1569 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1570 * - kvm_is_visible_gfn (mmu_check_root)
1571 */
1572 kvm_arch_flush_shadow_memslot(kvm, slot);
b10a038e
BG
1573
1574 /* Released in install_new_memslots. */
1575 mutex_lock(&kvm->slots_arch_lock);
1576
1577 /*
1578 * The arch-specific fields of the memslots could have changed
1579 * between releasing the slots_arch_lock in
1580 * install_new_memslots and here, so get a fresh copy of the
1581 * slots.
1582 */
1583 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
cf47f50b
SC
1584 }
1585
1586 r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
1587 if (r)
1588 goto out_slots;
1589
1590 update_memslots(slots, new, change);
1591 slots = install_new_memslots(kvm, as_id, slots);
1592
1593 kvm_arch_commit_memory_region(kvm, mem, old, new, change);
1594
1595 kvfree(slots);
1596 return 0;
1597
1598out_slots:
b10a038e
BG
1599 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1600 slot = id_to_memslot(slots, old->id);
1601 slot->flags &= ~KVM_MEMSLOT_INVALID;
cf47f50b 1602 slots = install_new_memslots(kvm, as_id, slots);
b10a038e
BG
1603 } else {
1604 mutex_unlock(&kvm->slots_arch_lock);
1605 }
cf47f50b
SC
1606 kvfree(slots);
1607 return r;
1608}
1609
5c0b4f3d
SC
1610static int kvm_delete_memslot(struct kvm *kvm,
1611 const struct kvm_userspace_memory_region *mem,
1612 struct kvm_memory_slot *old, int as_id)
1613{
1614 struct kvm_memory_slot new;
1615 int r;
1616
1617 if (!old->npages)
1618 return -EINVAL;
1619
1620 memset(&new, 0, sizeof(new));
1621 new.id = old->id;
9e9eb226
PX
1622 /*
1623 * This is only for debugging purpose; it should never be referenced
1624 * for a removed memslot.
1625 */
1626 new.as_id = as_id;
5c0b4f3d
SC
1627
1628 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
1629 if (r)
1630 return r;
1631
e96c81ee 1632 kvm_free_memslot(kvm, old);
5c0b4f3d
SC
1633 return 0;
1634}
1635
6aa8b732
AK
1636/*
1637 * Allocate some memory and give it an address in the guest physical address
1638 * space.
1639 *
1640 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 1641 *
02d5d55b 1642 * Must be called holding kvm->slots_lock for write.
6aa8b732 1643 */
f78e0e2e 1644int __kvm_set_memory_region(struct kvm *kvm,
09170a49 1645 const struct kvm_userspace_memory_region *mem)
6aa8b732 1646{
6aa8b732 1647 struct kvm_memory_slot old, new;
163da372 1648 struct kvm_memory_slot *tmp;
f64c0398 1649 enum kvm_mr_change change;
163da372
SC
1650 int as_id, id;
1651 int r;
6aa8b732 1652
a50d64d6
XG
1653 r = check_memory_region_flags(mem);
1654 if (r)
71a4c30b 1655 return r;
a50d64d6 1656
f481b069
PB
1657 as_id = mem->slot >> 16;
1658 id = (u16)mem->slot;
1659
6aa8b732
AK
1660 /* General sanity checks */
1661 if (mem->memory_size & (PAGE_SIZE - 1))
71a4c30b 1662 return -EINVAL;
6aa8b732 1663 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
71a4c30b 1664 return -EINVAL;
fa3d315a 1665 /* We can read the guest memory with __xxx_user() later on. */
09d952c9 1666 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
139bc8a6 1667 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
96d4f267 1668 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
09d952c9 1669 mem->memory_size))
71a4c30b 1670 return -EINVAL;
f481b069 1671 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
71a4c30b 1672 return -EINVAL;
6aa8b732 1673 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
71a4c30b 1674 return -EINVAL;
6aa8b732 1675
5c0b4f3d
SC
1676 /*
1677 * Make a full copy of the old memslot, the pointer will become stale
1678 * when the memslots are re-sorted by update_memslots(), and the old
1679 * memslot needs to be referenced after calling update_memslots(), e.g.
0dff0846 1680 * to free its resources and for arch specific behavior.
5c0b4f3d 1681 */
0577d1ab
SC
1682 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
1683 if (tmp) {
1684 old = *tmp;
1685 tmp = NULL;
1686 } else {
1687 memset(&old, 0, sizeof(old));
1688 old.id = id;
1689 }
163da372 1690
5c0b4f3d
SC
1691 if (!mem->memory_size)
1692 return kvm_delete_memslot(kvm, mem, &old, as_id);
1693
9e9eb226 1694 new.as_id = as_id;
f481b069 1695 new.id = id;
163da372
SC
1696 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1697 new.npages = mem->memory_size >> PAGE_SHIFT;
6aa8b732 1698 new.flags = mem->flags;
414de7ab 1699 new.userspace_addr = mem->userspace_addr;
6aa8b732 1700
163da372
SC
1701 if (new.npages > KVM_MEM_MAX_NR_PAGES)
1702 return -EINVAL;
1703
5c0b4f3d
SC
1704 if (!old.npages) {
1705 change = KVM_MR_CREATE;
163da372
SC
1706 new.dirty_bitmap = NULL;
1707 memset(&new.arch, 0, sizeof(new.arch));
5c0b4f3d
SC
1708 } else { /* Modify an existing slot. */
1709 if ((new.userspace_addr != old.userspace_addr) ||
163da372 1710 (new.npages != old.npages) ||
5c0b4f3d 1711 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
71a4c30b 1712 return -EINVAL;
09170a49 1713
163da372 1714 if (new.base_gfn != old.base_gfn)
5c0b4f3d
SC
1715 change = KVM_MR_MOVE;
1716 else if (new.flags != old.flags)
1717 change = KVM_MR_FLAGS_ONLY;
1718 else /* Nothing to change. */
1719 return 0;
163da372
SC
1720
1721 /* Copy dirty_bitmap and arch from the current memslot. */
1722 new.dirty_bitmap = old.dirty_bitmap;
1723 memcpy(&new.arch, &old.arch, sizeof(new.arch));
09170a49 1724 }
6aa8b732 1725
f64c0398 1726 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
0a706bee 1727 /* Check for overlaps */
163da372
SC
1728 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
1729 if (tmp->id == id)
0a706bee 1730 continue;
163da372
SC
1731 if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
1732 (new.base_gfn >= tmp->base_gfn + tmp->npages)))
71a4c30b 1733 return -EEXIST;
0a706bee 1734 }
6aa8b732 1735 }
6aa8b732 1736
414de7ab
SC
1737 /* Allocate/free page dirty bitmap as needed */
1738 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1739 new.dirty_bitmap = NULL;
044c59c4 1740 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
3c9bd400 1741 r = kvm_alloc_dirty_bitmap(&new);
71a4c30b
SC
1742 if (r)
1743 return r;
3c9bd400
JZ
1744
1745 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1746 bitmap_set(new.dirty_bitmap, 0, new.npages);
6aa8b732
AK
1747 }
1748
cf47f50b
SC
1749 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
1750 if (r)
1751 goto out_bitmap;
82ce2c96 1752
5c0b4f3d
SC
1753 if (old.dirty_bitmap && !new.dirty_bitmap)
1754 kvm_destroy_dirty_bitmap(&old);
6aa8b732
AK
1755 return 0;
1756
bd0e96fd
SC
1757out_bitmap:
1758 if (new.dirty_bitmap && !old.dirty_bitmap)
1759 kvm_destroy_dirty_bitmap(&new);
6aa8b732 1760 return r;
210c7c4d 1761}
f78e0e2e
SY
1762EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1763
1764int kvm_set_memory_region(struct kvm *kvm,
09170a49 1765 const struct kvm_userspace_memory_region *mem)
f78e0e2e
SY
1766{
1767 int r;
1768
79fac95e 1769 mutex_lock(&kvm->slots_lock);
47ae31e2 1770 r = __kvm_set_memory_region(kvm, mem);
79fac95e 1771 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
1772 return r;
1773}
210c7c4d
IE
1774EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1775
7940876e
SH
1776static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1777 struct kvm_userspace_memory_region *mem)
210c7c4d 1778{
f481b069 1779 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
e0d62c7f 1780 return -EINVAL;
09170a49 1781
47ae31e2 1782 return kvm_set_memory_region(kvm, mem);
6aa8b732
AK
1783}
1784
0dff0846 1785#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2a49f61d
SC
1786/**
1787 * kvm_get_dirty_log - get a snapshot of dirty pages
1788 * @kvm: pointer to kvm instance
1789 * @log: slot id and address to which we copy the log
1790 * @is_dirty: set to '1' if any dirty pages were found
1791 * @memslot: set to the associated memslot, always valid on success
1792 */
1793int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1794 int *is_dirty, struct kvm_memory_slot **memslot)
6aa8b732 1795{
9f6b8029 1796 struct kvm_memslots *slots;
843574a3 1797 int i, as_id, id;
87bf6e7d 1798 unsigned long n;
6aa8b732
AK
1799 unsigned long any = 0;
1800
b2cc64c4
PX
1801 /* Dirty ring tracking is exclusive to dirty log tracking */
1802 if (kvm->dirty_ring_size)
1803 return -ENXIO;
1804
2a49f61d
SC
1805 *memslot = NULL;
1806 *is_dirty = 0;
1807
f481b069
PB
1808 as_id = log->slot >> 16;
1809 id = (u16)log->slot;
1810 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
843574a3 1811 return -EINVAL;
6aa8b732 1812
f481b069 1813 slots = __kvm_memslots(kvm, as_id);
2a49f61d 1814 *memslot = id_to_memslot(slots, id);
0577d1ab 1815 if (!(*memslot) || !(*memslot)->dirty_bitmap)
843574a3 1816 return -ENOENT;
6aa8b732 1817
2a49f61d
SC
1818 kvm_arch_sync_dirty_log(kvm, *memslot);
1819
1820 n = kvm_dirty_bitmap_bytes(*memslot);
6aa8b732 1821
cd1a4a98 1822 for (i = 0; !any && i < n/sizeof(long); ++i)
2a49f61d 1823 any = (*memslot)->dirty_bitmap[i];
6aa8b732 1824
2a49f61d 1825 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
843574a3 1826 return -EFAULT;
6aa8b732 1827
5bb064dc
ZX
1828 if (any)
1829 *is_dirty = 1;
843574a3 1830 return 0;
6aa8b732 1831}
2ba9f0d8 1832EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
6aa8b732 1833
0dff0846 1834#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
ba0513b5 1835/**
b8b00220 1836 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2a31b9db 1837 * and reenable dirty page tracking for the corresponding pages.
ba0513b5
MS
1838 * @kvm: pointer to kvm instance
1839 * @log: slot id and address to which we copy the log
ba0513b5
MS
1840 *
1841 * We need to keep it in mind that VCPU threads can write to the bitmap
1842 * concurrently. So, to avoid losing track of dirty pages we keep the
1843 * following order:
1844 *
1845 * 1. Take a snapshot of the bit and clear it if needed.
1846 * 2. Write protect the corresponding page.
1847 * 3. Copy the snapshot to the userspace.
1848 * 4. Upon return caller flushes TLB's if needed.
1849 *
1850 * Between 2 and 4, the guest may write to the page using the remaining TLB
1851 * entry. This is not a problem because the page is reported dirty using
1852 * the snapshot taken before and step 4 ensures that writes done after
1853 * exiting to userspace will be logged for the next call.
1854 *
1855 */
0dff0846 1856static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
ba0513b5 1857{
9f6b8029 1858 struct kvm_memslots *slots;
ba0513b5 1859 struct kvm_memory_slot *memslot;
58d6db34 1860 int i, as_id, id;
ba0513b5
MS
1861 unsigned long n;
1862 unsigned long *dirty_bitmap;
1863 unsigned long *dirty_bitmap_buffer;
0dff0846 1864 bool flush;
ba0513b5 1865
b2cc64c4
PX
1866 /* Dirty ring tracking is exclusive to dirty log tracking */
1867 if (kvm->dirty_ring_size)
1868 return -ENXIO;
1869
f481b069
PB
1870 as_id = log->slot >> 16;
1871 id = (u16)log->slot;
1872 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
58d6db34 1873 return -EINVAL;
ba0513b5 1874
f481b069
PB
1875 slots = __kvm_memslots(kvm, as_id);
1876 memslot = id_to_memslot(slots, id);
0577d1ab
SC
1877 if (!memslot || !memslot->dirty_bitmap)
1878 return -ENOENT;
ba0513b5
MS
1879
1880 dirty_bitmap = memslot->dirty_bitmap;
ba0513b5 1881
0dff0846
SC
1882 kvm_arch_sync_dirty_log(kvm, memslot);
1883
ba0513b5 1884 n = kvm_dirty_bitmap_bytes(memslot);
0dff0846 1885 flush = false;
2a31b9db
PB
1886 if (kvm->manual_dirty_log_protect) {
1887 /*
1888 * Unlike kvm_get_dirty_log, we always return false in *flush,
1889 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
1890 * is some code duplication between this function and
1891 * kvm_get_dirty_log, but hopefully all architecture
1892 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1893 * can be eliminated.
1894 */
1895 dirty_bitmap_buffer = dirty_bitmap;
1896 } else {
1897 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1898 memset(dirty_bitmap_buffer, 0, n);
ba0513b5 1899
531810ca 1900 KVM_MMU_LOCK(kvm);
2a31b9db
PB
1901 for (i = 0; i < n / sizeof(long); i++) {
1902 unsigned long mask;
1903 gfn_t offset;
ba0513b5 1904
2a31b9db
PB
1905 if (!dirty_bitmap[i])
1906 continue;
1907
0dff0846 1908 flush = true;
2a31b9db
PB
1909 mask = xchg(&dirty_bitmap[i], 0);
1910 dirty_bitmap_buffer[i] = mask;
1911
a67794ca
LT
1912 offset = i * BITS_PER_LONG;
1913 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1914 offset, mask);
2a31b9db 1915 }
531810ca 1916 KVM_MMU_UNLOCK(kvm);
2a31b9db
PB
1917 }
1918
0dff0846
SC
1919 if (flush)
1920 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1921
2a31b9db
PB
1922 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1923 return -EFAULT;
1924 return 0;
1925}
0dff0846
SC
1926
1927
1928/**
1929 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1930 * @kvm: kvm instance
1931 * @log: slot id and address to which we copy the log
1932 *
1933 * Steps 1-4 below provide general overview of dirty page logging. See
1934 * kvm_get_dirty_log_protect() function description for additional details.
1935 *
1936 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1937 * always flush the TLB (step 4) even if previous step failed and the dirty
1938 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1939 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1940 * writes will be marked dirty for next log read.
1941 *
1942 * 1. Take a snapshot of the bit and clear it if needed.
1943 * 2. Write protect the corresponding page.
1944 * 3. Copy the snapshot to the userspace.
1945 * 4. Flush TLB's if needed.
1946 */
1947static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1948 struct kvm_dirty_log *log)
1949{
1950 int r;
1951
1952 mutex_lock(&kvm->slots_lock);
1953
1954 r = kvm_get_dirty_log_protect(kvm, log);
1955
1956 mutex_unlock(&kvm->slots_lock);
1957 return r;
1958}
2a31b9db
PB
1959
1960/**
1961 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1962 * and reenable dirty page tracking for the corresponding pages.
1963 * @kvm: pointer to kvm instance
1964 * @log: slot id and address from which to fetch the bitmap of dirty pages
1965 */
0dff0846
SC
1966static int kvm_clear_dirty_log_protect(struct kvm *kvm,
1967 struct kvm_clear_dirty_log *log)
2a31b9db
PB
1968{
1969 struct kvm_memslots *slots;
1970 struct kvm_memory_slot *memslot;
98938aa8 1971 int as_id, id;
2a31b9db 1972 gfn_t offset;
98938aa8 1973 unsigned long i, n;
2a31b9db
PB
1974 unsigned long *dirty_bitmap;
1975 unsigned long *dirty_bitmap_buffer;
0dff0846 1976 bool flush;
2a31b9db 1977
b2cc64c4
PX
1978 /* Dirty ring tracking is exclusive to dirty log tracking */
1979 if (kvm->dirty_ring_size)
1980 return -ENXIO;
1981
2a31b9db
PB
1982 as_id = log->slot >> 16;
1983 id = (u16)log->slot;
1984 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1985 return -EINVAL;
1986
76d58e0f 1987 if (log->first_page & 63)
2a31b9db
PB
1988 return -EINVAL;
1989
1990 slots = __kvm_memslots(kvm, as_id);
1991 memslot = id_to_memslot(slots, id);
0577d1ab
SC
1992 if (!memslot || !memslot->dirty_bitmap)
1993 return -ENOENT;
2a31b9db
PB
1994
1995 dirty_bitmap = memslot->dirty_bitmap;
2a31b9db 1996
4ddc9204 1997 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
98938aa8
TB
1998
1999 if (log->first_page > memslot->npages ||
76d58e0f
PB
2000 log->num_pages > memslot->npages - log->first_page ||
2001 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2002 return -EINVAL;
98938aa8 2003
0dff0846
SC
2004 kvm_arch_sync_dirty_log(kvm, memslot);
2005
2006 flush = false;
2a31b9db
PB
2007 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2008 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2009 return -EFAULT;
ba0513b5 2010
531810ca 2011 KVM_MMU_LOCK(kvm);
53eac7a8
PX
2012 for (offset = log->first_page, i = offset / BITS_PER_LONG,
2013 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2a31b9db
PB
2014 i++, offset += BITS_PER_LONG) {
2015 unsigned long mask = *dirty_bitmap_buffer++;
2016 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2017 if (!mask)
ba0513b5
MS
2018 continue;
2019
2a31b9db 2020 mask &= atomic_long_fetch_andnot(mask, p);
ba0513b5 2021
2a31b9db
PB
2022 /*
2023 * mask contains the bits that really have been cleared. This
2024 * never includes any bits beyond the length of the memslot (if
2025 * the length is not aligned to 64 pages), therefore it is not
2026 * a problem if userspace sets them in log->dirty_bitmap.
2027 */
58d2930f 2028 if (mask) {
0dff0846 2029 flush = true;
58d2930f
TY
2030 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2031 offset, mask);
2032 }
ba0513b5 2033 }
531810ca 2034 KVM_MMU_UNLOCK(kvm);
2a31b9db 2035
0dff0846
SC
2036 if (flush)
2037 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2038
58d6db34 2039 return 0;
ba0513b5 2040}
0dff0846
SC
2041
2042static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2043 struct kvm_clear_dirty_log *log)
2044{
2045 int r;
2046
2047 mutex_lock(&kvm->slots_lock);
2048
2049 r = kvm_clear_dirty_log_protect(kvm, log);
2050
2051 mutex_unlock(&kvm->slots_lock);
2052 return r;
2053}
2054#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
ba0513b5 2055
49c7754c
GN
2056struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2057{
2058 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2059}
a1f4d395 2060EXPORT_SYMBOL_GPL(gfn_to_memslot);
6aa8b732 2061
8e73485c
PB
2062struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2063{
fe22ed82
DM
2064 struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2065 struct kvm_memory_slot *slot;
2066 int slot_index;
2067
2068 slot = try_get_memslot(slots, vcpu->last_used_slot, gfn);
2069 if (slot)
2070 return slot;
2071
2072 /*
2073 * Fall back to searching all memslots. We purposely use
2074 * search_memslots() instead of __gfn_to_memslot() to avoid
2075 * thrashing the VM-wide last_used_index in kvm_memslots.
2076 */
2077 slot = search_memslots(slots, gfn, &slot_index);
2078 if (slot) {
2079 vcpu->last_used_slot = slot_index;
2080 return slot;
2081 }
2082
2083 return NULL;
8e73485c 2084}
e72436bc 2085EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
8e73485c 2086
33e94154 2087bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
e0d62c7f 2088{
bf3e05bc 2089 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
e0d62c7f 2090
c36b7150 2091 return kvm_is_visible_memslot(memslot);
e0d62c7f
IE
2092}
2093EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2094
995decb6
VK
2095bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2096{
2097 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2098
2099 return kvm_is_visible_memslot(memslot);
2100}
2101EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2102
f9b84e19 2103unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
8f0b1ab6
JR
2104{
2105 struct vm_area_struct *vma;
2106 unsigned long addr, size;
2107
2108 size = PAGE_SIZE;
2109
42cde48b 2110 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
8f0b1ab6
JR
2111 if (kvm_is_error_hva(addr))
2112 return PAGE_SIZE;
2113
d8ed45c5 2114 mmap_read_lock(current->mm);
8f0b1ab6
JR
2115 vma = find_vma(current->mm, addr);
2116 if (!vma)
2117 goto out;
2118
2119 size = vma_kernel_pagesize(vma);
2120
2121out:
d8ed45c5 2122 mmap_read_unlock(current->mm);
8f0b1ab6
JR
2123
2124 return size;
2125}
2126
4d8b81ab
XG
2127static bool memslot_is_readonly(struct kvm_memory_slot *slot)
2128{
2129 return slot->flags & KVM_MEM_READONLY;
2130}
2131
4d8b81ab
XG
2132static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2133 gfn_t *nr_pages, bool write)
539cb660 2134{
bc6678a3 2135 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
ca3a490c 2136 return KVM_HVA_ERR_BAD;
48987781 2137
4d8b81ab
XG
2138 if (memslot_is_readonly(slot) && write)
2139 return KVM_HVA_ERR_RO_BAD;
48987781
XG
2140
2141 if (nr_pages)
2142 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2143
4d8b81ab 2144 return __gfn_to_hva_memslot(slot, gfn);
539cb660 2145}
48987781 2146
4d8b81ab
XG
2147static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2148 gfn_t *nr_pages)
2149{
2150 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
539cb660 2151}
48987781 2152
4d8b81ab 2153unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
7940876e 2154 gfn_t gfn)
4d8b81ab
XG
2155{
2156 return gfn_to_hva_many(slot, gfn, NULL);
2157}
2158EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2159
48987781
XG
2160unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2161{
49c7754c 2162 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
48987781 2163}
0d150298 2164EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 2165
8e73485c
PB
2166unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2167{
2168 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2169}
2170EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2171
86ab8cff 2172/*
970c0d4b
WY
2173 * Return the hva of a @gfn and the R/W attribute if possible.
2174 *
2175 * @slot: the kvm_memory_slot which contains @gfn
2176 * @gfn: the gfn to be translated
2177 * @writable: used to return the read/write attribute of the @slot if the hva
2178 * is valid and @writable is not NULL
86ab8cff 2179 */
64d83126
CD
2180unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2181 gfn_t gfn, bool *writable)
86ab8cff 2182{
a2ac07fe
GN
2183 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2184
2185 if (!kvm_is_error_hva(hva) && writable)
ba6a3541
PB
2186 *writable = !memslot_is_readonly(slot);
2187
a2ac07fe 2188 return hva;
86ab8cff
XG
2189}
2190
64d83126
CD
2191unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2192{
2193 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2194
2195 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2196}
2197
8e73485c
PB
2198unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2199{
2200 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2201
2202 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2203}
2204
fafc3dba
HY
2205static inline int check_user_page_hwpoison(unsigned long addr)
2206{
0d731759 2207 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
fafc3dba 2208
0d731759 2209 rc = get_user_pages(addr, 1, flags, NULL, NULL);
fafc3dba
HY
2210 return rc == -EHWPOISON;
2211}
2212
2fc84311 2213/*
b9b33da2
PB
2214 * The fast path to get the writable pfn which will be stored in @pfn,
2215 * true indicates success, otherwise false is returned. It's also the
311497e0 2216 * only part that runs if we can in atomic context.
2fc84311 2217 */
b9b33da2
PB
2218static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2219 bool *writable, kvm_pfn_t *pfn)
954bbbc2 2220{
8d4e1288 2221 struct page *page[1];
954bbbc2 2222
12ce13fe
XG
2223 /*
2224 * Fast pin a writable pfn only if it is a write fault request
2225 * or the caller allows to map a writable pfn for a read fault
2226 * request.
2227 */
2228 if (!(write_fault || writable))
2229 return false;
612819c3 2230
dadbb612 2231 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2fc84311 2232 *pfn = page_to_pfn(page[0]);
612819c3 2233
2fc84311
XG
2234 if (writable)
2235 *writable = true;
2236 return true;
2237 }
af585b92 2238
2fc84311
XG
2239 return false;
2240}
612819c3 2241
2fc84311
XG
2242/*
2243 * The slow path to get the pfn of the specified host virtual address,
2244 * 1 indicates success, -errno is returned if error is detected.
2245 */
2246static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
ba049e93 2247 bool *writable, kvm_pfn_t *pfn)
2fc84311 2248{
ce53053c
AV
2249 unsigned int flags = FOLL_HWPOISON;
2250 struct page *page;
2fc84311 2251 int npages = 0;
612819c3 2252
2fc84311
XG
2253 might_sleep();
2254
2255 if (writable)
2256 *writable = write_fault;
2257
ce53053c
AV
2258 if (write_fault)
2259 flags |= FOLL_WRITE;
2260 if (async)
2261 flags |= FOLL_NOWAIT;
d4944b0e 2262
ce53053c 2263 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2fc84311
XG
2264 if (npages != 1)
2265 return npages;
2266
2267 /* map read fault as writable if possible */
12ce13fe 2268 if (unlikely(!write_fault) && writable) {
ce53053c 2269 struct page *wpage;
2fc84311 2270
dadbb612 2271 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2fc84311 2272 *writable = true;
ce53053c
AV
2273 put_page(page);
2274 page = wpage;
612819c3 2275 }
887c08ac 2276 }
ce53053c 2277 *pfn = page_to_pfn(page);
2fc84311
XG
2278 return npages;
2279}
539cb660 2280
4d8b81ab
XG
2281static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2282{
2283 if (unlikely(!(vma->vm_flags & VM_READ)))
2284 return false;
2e2e3738 2285
4d8b81ab
XG
2286 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2287 return false;
887c08ac 2288
4d8b81ab
XG
2289 return true;
2290}
bf998156 2291
f8be156b
NP
2292static int kvm_try_get_pfn(kvm_pfn_t pfn)
2293{
2294 if (kvm_is_reserved_pfn(pfn))
2295 return 1;
2296 return get_page_unless_zero(pfn_to_page(pfn));
2297}
2298
92176a8e
PB
2299static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2300 unsigned long addr, bool *async,
a340b3e2
KA
2301 bool write_fault, bool *writable,
2302 kvm_pfn_t *p_pfn)
92176a8e 2303{
a9545779 2304 kvm_pfn_t pfn;
bd2fae8d
PB
2305 pte_t *ptep;
2306 spinlock_t *ptl;
add6a0cd
PB
2307 int r;
2308
9fd6dad1 2309 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
add6a0cd
PB
2310 if (r) {
2311 /*
2312 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2313 * not call the fault handler, so do it here.
2314 */
2315 bool unlocked = false;
64019a2e 2316 r = fixup_user_fault(current->mm, addr,
add6a0cd
PB
2317 (write_fault ? FAULT_FLAG_WRITE : 0),
2318 &unlocked);
a8387d0b
PB
2319 if (unlocked)
2320 return -EAGAIN;
add6a0cd
PB
2321 if (r)
2322 return r;
2323
9fd6dad1 2324 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
add6a0cd
PB
2325 if (r)
2326 return r;
bd2fae8d 2327 }
add6a0cd 2328
bd2fae8d
PB
2329 if (write_fault && !pte_write(*ptep)) {
2330 pfn = KVM_PFN_ERR_RO_FAULT;
2331 goto out;
add6a0cd
PB
2332 }
2333
a340b3e2 2334 if (writable)
bd2fae8d
PB
2335 *writable = pte_write(*ptep);
2336 pfn = pte_pfn(*ptep);
add6a0cd
PB
2337
2338 /*
2339 * Get a reference here because callers of *hva_to_pfn* and
2340 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2341 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
2342 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
2343 * simply do nothing for reserved pfns.
2344 *
2345 * Whoever called remap_pfn_range is also going to call e.g.
2346 * unmap_mapping_range before the underlying pages are freed,
2347 * causing a call to our MMU notifier.
f8be156b
NP
2348 *
2349 * Certain IO or PFNMAP mappings can be backed with valid
2350 * struct pages, but be allocated without refcounting e.g.,
2351 * tail pages of non-compound higher order allocations, which
2352 * would then underflow the refcount when the caller does the
2353 * required put_page. Don't allow those pages here.
add6a0cd 2354 */
f8be156b
NP
2355 if (!kvm_try_get_pfn(pfn))
2356 r = -EFAULT;
add6a0cd 2357
bd2fae8d
PB
2358out:
2359 pte_unmap_unlock(ptep, ptl);
add6a0cd 2360 *p_pfn = pfn;
f8be156b
NP
2361
2362 return r;
92176a8e
PB
2363}
2364
12ce13fe
XG
2365/*
2366 * Pin guest page in memory and return its pfn.
2367 * @addr: host virtual address which maps memory to the guest
2368 * @atomic: whether this function can sleep
2369 * @async: whether this function need to wait IO complete if the
2370 * host page is not in the memory
2371 * @write_fault: whether we should get a writable host page
2372 * @writable: whether it allows to map a writable host page for !@write_fault
2373 *
2374 * The function will map a writable host page for these two cases:
2375 * 1): @write_fault = true
2376 * 2): @write_fault = false && @writable, @writable will tell the caller
2377 * whether the mapping is writable.
2378 */
ba049e93 2379static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2fc84311
XG
2380 bool write_fault, bool *writable)
2381{
2382 struct vm_area_struct *vma;
ba049e93 2383 kvm_pfn_t pfn = 0;
92176a8e 2384 int npages, r;
2e2e3738 2385
2fc84311
XG
2386 /* we can do it either atomically or asynchronously, not both */
2387 BUG_ON(atomic && async);
8d4e1288 2388
b9b33da2 2389 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2fc84311
XG
2390 return pfn;
2391
2392 if (atomic)
2393 return KVM_PFN_ERR_FAULT;
2394
2395 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2396 if (npages == 1)
2397 return pfn;
8d4e1288 2398
d8ed45c5 2399 mmap_read_lock(current->mm);
2fc84311
XG
2400 if (npages == -EHWPOISON ||
2401 (!async && check_user_page_hwpoison(addr))) {
2402 pfn = KVM_PFN_ERR_HWPOISON;
2403 goto exit;
2404 }
2405
a8387d0b 2406retry:
fc98c03b 2407 vma = vma_lookup(current->mm, addr);
2fc84311
XG
2408
2409 if (vma == NULL)
2410 pfn = KVM_PFN_ERR_FAULT;
92176a8e 2411 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
a340b3e2 2412 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
a8387d0b
PB
2413 if (r == -EAGAIN)
2414 goto retry;
92176a8e
PB
2415 if (r < 0)
2416 pfn = KVM_PFN_ERR_FAULT;
2fc84311 2417 } else {
4d8b81ab 2418 if (async && vma_is_valid(vma, write_fault))
2fc84311
XG
2419 *async = true;
2420 pfn = KVM_PFN_ERR_FAULT;
2421 }
2422exit:
d8ed45c5 2423 mmap_read_unlock(current->mm);
2e2e3738 2424 return pfn;
35149e21
AL
2425}
2426
ba049e93
DW
2427kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
2428 bool atomic, bool *async, bool write_fault,
4a42d848 2429 bool *writable, hva_t *hva)
887c08ac 2430{
4d8b81ab
XG
2431 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2432
4a42d848
DS
2433 if (hva)
2434 *hva = addr;
2435
b2740d35
PB
2436 if (addr == KVM_HVA_ERR_RO_BAD) {
2437 if (writable)
2438 *writable = false;
4d8b81ab 2439 return KVM_PFN_ERR_RO_FAULT;
b2740d35 2440 }
4d8b81ab 2441
b2740d35
PB
2442 if (kvm_is_error_hva(addr)) {
2443 if (writable)
2444 *writable = false;
81c52c56 2445 return KVM_PFN_NOSLOT;
b2740d35 2446 }
4d8b81ab
XG
2447
2448 /* Do not map writable pfn in the readonly memslot. */
2449 if (writable && memslot_is_readonly(slot)) {
2450 *writable = false;
2451 writable = NULL;
2452 }
2453
2454 return hva_to_pfn(addr, atomic, async, write_fault,
2455 writable);
887c08ac 2456}
3520469d 2457EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
887c08ac 2458
ba049e93 2459kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
612819c3
MT
2460 bool *writable)
2461{
e37afc6e 2462 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
4a42d848 2463 write_fault, writable, NULL);
612819c3
MT
2464}
2465EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2466
ba049e93 2467kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 2468{
4a42d848 2469 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
506f0d6f 2470}
e37afc6e 2471EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
506f0d6f 2472
ba049e93 2473kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 2474{
4a42d848 2475 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
506f0d6f 2476}
037d92dc 2477EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
506f0d6f 2478
ba049e93 2479kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
8e73485c
PB
2480{
2481 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2482}
2483EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2484
ba049e93 2485kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
e37afc6e
PB
2486{
2487 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2488}
2489EXPORT_SYMBOL_GPL(gfn_to_pfn);
2490
ba049e93 2491kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8e73485c
PB
2492{
2493 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2494}
2495EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2496
d9ef13c2
PB
2497int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2498 struct page **pages, int nr_pages)
48987781
XG
2499{
2500 unsigned long addr;
076b925d 2501 gfn_t entry = 0;
48987781 2502
d9ef13c2 2503 addr = gfn_to_hva_many(slot, gfn, &entry);
48987781
XG
2504 if (kvm_is_error_hva(addr))
2505 return -1;
2506
2507 if (entry < nr_pages)
2508 return 0;
2509
dadbb612 2510 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
48987781
XG
2511}
2512EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2513
ba049e93 2514static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
a2766325 2515{
81c52c56 2516 if (is_error_noslot_pfn(pfn))
cb9aaa30 2517 return KVM_ERR_PTR_BAD_PAGE;
a2766325 2518
bf4bea8e 2519 if (kvm_is_reserved_pfn(pfn)) {
cb9aaa30 2520 WARN_ON(1);
6cede2e6 2521 return KVM_ERR_PTR_BAD_PAGE;
cb9aaa30 2522 }
a2766325
XG
2523
2524 return pfn_to_page(pfn);
2525}
2526
35149e21
AL
2527struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2528{
ba049e93 2529 kvm_pfn_t pfn;
2e2e3738
AL
2530
2531 pfn = gfn_to_pfn(kvm, gfn);
2e2e3738 2532
a2766325 2533 return kvm_pfn_to_page(pfn);
954bbbc2
AK
2534}
2535EXPORT_SYMBOL_GPL(gfn_to_page);
2536
91724814
BO
2537void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
2538{
2539 if (pfn == 0)
2540 return;
2541
2542 if (cache)
2543 cache->pfn = cache->gfn = 0;
2544
2545 if (dirty)
2546 kvm_release_pfn_dirty(pfn);
2547 else
2548 kvm_release_pfn_clean(pfn);
2549}
2550
2551static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2552 struct gfn_to_pfn_cache *cache, u64 gen)
2553{
2554 kvm_release_pfn(cache->pfn, cache->dirty, cache);
2555
2556 cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2557 cache->gfn = gfn;
2558 cache->dirty = false;
2559 cache->generation = gen;
2560}
2561
1eff70a9 2562static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
91724814
BO
2563 struct kvm_host_map *map,
2564 struct gfn_to_pfn_cache *cache,
2565 bool atomic)
e45adf66
KA
2566{
2567 kvm_pfn_t pfn;
2568 void *hva = NULL;
2569 struct page *page = KVM_UNMAPPED_PAGE;
1eff70a9 2570 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
91724814 2571 u64 gen = slots->generation;
e45adf66
KA
2572
2573 if (!map)
2574 return -EINVAL;
2575
91724814
BO
2576 if (cache) {
2577 if (!cache->pfn || cache->gfn != gfn ||
2578 cache->generation != gen) {
2579 if (atomic)
2580 return -EAGAIN;
2581 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2582 }
2583 pfn = cache->pfn;
2584 } else {
2585 if (atomic)
2586 return -EAGAIN;
2587 pfn = gfn_to_pfn_memslot(slot, gfn);
2588 }
e45adf66
KA
2589 if (is_error_noslot_pfn(pfn))
2590 return -EINVAL;
2591
2592 if (pfn_valid(pfn)) {
2593 page = pfn_to_page(pfn);
91724814
BO
2594 if (atomic)
2595 hva = kmap_atomic(page);
2596 else
2597 hva = kmap(page);
d30b214d 2598#ifdef CONFIG_HAS_IOMEM
91724814 2599 } else if (!atomic) {
e45adf66 2600 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
91724814
BO
2601 } else {
2602 return -EINVAL;
d30b214d 2603#endif
e45adf66
KA
2604 }
2605
2606 if (!hva)
2607 return -EFAULT;
2608
2609 map->page = page;
2610 map->hva = hva;
2611 map->pfn = pfn;
2612 map->gfn = gfn;
2613
2614 return 0;
2615}
2616
91724814
BO
2617int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2618 struct gfn_to_pfn_cache *cache, bool atomic)
1eff70a9 2619{
91724814
BO
2620 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2621 cache, atomic);
1eff70a9
BO
2622}
2623EXPORT_SYMBOL_GPL(kvm_map_gfn);
2624
e45adf66
KA
2625int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2626{
91724814
BO
2627 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2628 NULL, false);
e45adf66
KA
2629}
2630EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2631
28bd726a
PX
2632static void __kvm_unmap_gfn(struct kvm *kvm,
2633 struct kvm_memory_slot *memslot,
91724814
BO
2634 struct kvm_host_map *map,
2635 struct gfn_to_pfn_cache *cache,
2636 bool dirty, bool atomic)
e45adf66
KA
2637{
2638 if (!map)
2639 return;
2640
2641 if (!map->hva)
2642 return;
2643
91724814
BO
2644 if (map->page != KVM_UNMAPPED_PAGE) {
2645 if (atomic)
2646 kunmap_atomic(map->hva);
2647 else
2648 kunmap(map->page);
2649 }
eb1f2f38 2650#ifdef CONFIG_HAS_IOMEM
91724814 2651 else if (!atomic)
e45adf66 2652 memunmap(map->hva);
91724814
BO
2653 else
2654 WARN_ONCE(1, "Unexpected unmapping in atomic context");
eb1f2f38 2655#endif
e45adf66 2656
91724814 2657 if (dirty)
28bd726a 2658 mark_page_dirty_in_slot(kvm, memslot, map->gfn);
91724814
BO
2659
2660 if (cache)
2661 cache->dirty |= dirty;
2662 else
2663 kvm_release_pfn(map->pfn, dirty, NULL);
e45adf66
KA
2664
2665 map->hva = NULL;
2666 map->page = NULL;
2667}
1eff70a9 2668
91724814
BO
2669int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
2670 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
1eff70a9 2671{
28bd726a 2672 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
91724814 2673 cache, dirty, atomic);
1eff70a9
BO
2674 return 0;
2675}
2676EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
2677
2678void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2679{
28bd726a
PX
2680 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
2681 map, NULL, dirty, false);
1eff70a9 2682}
e45adf66
KA
2683EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2684
8e73485c
PB
2685struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2686{
ba049e93 2687 kvm_pfn_t pfn;
8e73485c
PB
2688
2689 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2690
2691 return kvm_pfn_to_page(pfn);
2692}
2693EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2694
b4231d61
IE
2695void kvm_release_page_clean(struct page *page)
2696{
32cad84f
XG
2697 WARN_ON(is_error_page(page));
2698
35149e21 2699 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
2700}
2701EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2702
ba049e93 2703void kvm_release_pfn_clean(kvm_pfn_t pfn)
35149e21 2704{
bf4bea8e 2705 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2e2e3738 2706 put_page(pfn_to_page(pfn));
35149e21
AL
2707}
2708EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2709
b4231d61 2710void kvm_release_page_dirty(struct page *page)
8a7ae055 2711{
a2766325
XG
2712 WARN_ON(is_error_page(page));
2713
35149e21
AL
2714 kvm_release_pfn_dirty(page_to_pfn(page));
2715}
2716EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2717
f7a6509f 2718void kvm_release_pfn_dirty(kvm_pfn_t pfn)
35149e21
AL
2719{
2720 kvm_set_pfn_dirty(pfn);
2721 kvm_release_pfn_clean(pfn);
2722}
f7a6509f 2723EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
35149e21 2724
ba049e93 2725void kvm_set_pfn_dirty(kvm_pfn_t pfn)
35149e21 2726{
d29c03a5
ML
2727 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2728 SetPageDirty(pfn_to_page(pfn));
8a7ae055 2729}
35149e21
AL
2730EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2731
ba049e93 2732void kvm_set_pfn_accessed(kvm_pfn_t pfn)
35149e21 2733{
a78986aa 2734 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2e2e3738 2735 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
2736}
2737EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2738
ba049e93 2739void kvm_get_pfn(kvm_pfn_t pfn)
35149e21 2740{
bf4bea8e 2741 if (!kvm_is_reserved_pfn(pfn))
2e2e3738 2742 get_page(pfn_to_page(pfn));
35149e21
AL
2743}
2744EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 2745
195aefde
IE
2746static int next_segment(unsigned long len, int offset)
2747{
2748 if (len > PAGE_SIZE - offset)
2749 return PAGE_SIZE - offset;
2750 else
2751 return len;
2752}
2753
8e73485c
PB
2754static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2755 void *data, int offset, int len)
195aefde 2756{
e0506bcb
IE
2757 int r;
2758 unsigned long addr;
195aefde 2759
8e73485c 2760 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
e0506bcb
IE
2761 if (kvm_is_error_hva(addr))
2762 return -EFAULT;
3180a7fc 2763 r = __copy_from_user(data, (void __user *)addr + offset, len);
e0506bcb 2764 if (r)
195aefde 2765 return -EFAULT;
195aefde
IE
2766 return 0;
2767}
8e73485c
PB
2768
2769int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2770 int len)
2771{
2772 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2773
2774 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2775}
195aefde
IE
2776EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2777
8e73485c
PB
2778int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2779 int offset, int len)
2780{
2781 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2782
2783 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2784}
2785EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2786
195aefde
IE
2787int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2788{
2789 gfn_t gfn = gpa >> PAGE_SHIFT;
2790 int seg;
2791 int offset = offset_in_page(gpa);
2792 int ret;
2793
2794 while ((seg = next_segment(len, offset)) != 0) {
2795 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2796 if (ret < 0)
2797 return ret;
2798 offset = 0;
2799 len -= seg;
2800 data += seg;
2801 ++gfn;
2802 }
2803 return 0;
2804}
2805EXPORT_SYMBOL_GPL(kvm_read_guest);
2806
8e73485c 2807int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
7ec54588 2808{
7ec54588 2809 gfn_t gfn = gpa >> PAGE_SHIFT;
8e73485c 2810 int seg;
7ec54588 2811 int offset = offset_in_page(gpa);
8e73485c
PB
2812 int ret;
2813
2814 while ((seg = next_segment(len, offset)) != 0) {
2815 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2816 if (ret < 0)
2817 return ret;
2818 offset = 0;
2819 len -= seg;
2820 data += seg;
2821 ++gfn;
2822 }
2823 return 0;
2824}
2825EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
7ec54588 2826
8e73485c
PB
2827static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2828 void *data, int offset, unsigned long len)
2829{
2830 int r;
2831 unsigned long addr;
2832
2833 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
7ec54588
MT
2834 if (kvm_is_error_hva(addr))
2835 return -EFAULT;
0aac03f0 2836 pagefault_disable();
3180a7fc 2837 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 2838 pagefault_enable();
7ec54588
MT
2839 if (r)
2840 return -EFAULT;
2841 return 0;
2842}
7ec54588 2843
8e73485c
PB
2844int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2845 void *data, unsigned long len)
2846{
2847 gfn_t gfn = gpa >> PAGE_SHIFT;
2848 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2849 int offset = offset_in_page(gpa);
2850
2851 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2852}
2853EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2854
28bd726a
PX
2855static int __kvm_write_guest_page(struct kvm *kvm,
2856 struct kvm_memory_slot *memslot, gfn_t gfn,
8e73485c 2857 const void *data, int offset, int len)
195aefde 2858{
e0506bcb
IE
2859 int r;
2860 unsigned long addr;
195aefde 2861
251eb841 2862 addr = gfn_to_hva_memslot(memslot, gfn);
e0506bcb
IE
2863 if (kvm_is_error_hva(addr))
2864 return -EFAULT;
8b0cedff 2865 r = __copy_to_user((void __user *)addr + offset, data, len);
e0506bcb 2866 if (r)
195aefde 2867 return -EFAULT;
28bd726a 2868 mark_page_dirty_in_slot(kvm, memslot, gfn);
195aefde
IE
2869 return 0;
2870}
8e73485c
PB
2871
2872int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2873 const void *data, int offset, int len)
2874{
2875 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2876
28bd726a 2877 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
8e73485c 2878}
195aefde
IE
2879EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2880
8e73485c
PB
2881int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2882 const void *data, int offset, int len)
2883{
2884 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2885
28bd726a 2886 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
8e73485c
PB
2887}
2888EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2889
195aefde
IE
2890int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2891 unsigned long len)
2892{
2893 gfn_t gfn = gpa >> PAGE_SHIFT;
2894 int seg;
2895 int offset = offset_in_page(gpa);
2896 int ret;
2897
2898 while ((seg = next_segment(len, offset)) != 0) {
2899 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2900 if (ret < 0)
2901 return ret;
2902 offset = 0;
2903 len -= seg;
2904 data += seg;
2905 ++gfn;
2906 }
2907 return 0;
2908}
ff651cb6 2909EXPORT_SYMBOL_GPL(kvm_write_guest);
195aefde 2910
8e73485c
PB
2911int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2912 unsigned long len)
2913{
2914 gfn_t gfn = gpa >> PAGE_SHIFT;
2915 int seg;
2916 int offset = offset_in_page(gpa);
2917 int ret;
2918
2919 while ((seg = next_segment(len, offset)) != 0) {
2920 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2921 if (ret < 0)
2922 return ret;
2923 offset = 0;
2924 len -= seg;
2925 data += seg;
2926 ++gfn;
2927 }
2928 return 0;
2929}
2930EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2931
5a2d4365
PB
2932static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2933 struct gfn_to_hva_cache *ghc,
2934 gpa_t gpa, unsigned long len)
49c7754c 2935{
49c7754c 2936 int offset = offset_in_page(gpa);
8f964525
AH
2937 gfn_t start_gfn = gpa >> PAGE_SHIFT;
2938 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2939 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2940 gfn_t nr_pages_avail;
49c7754c 2941
6ad1e29f 2942 /* Update ghc->generation before performing any error checks. */
49c7754c 2943 ghc->generation = slots->generation;
6ad1e29f
SC
2944
2945 if (start_gfn > end_gfn) {
2946 ghc->hva = KVM_HVA_ERR_BAD;
2947 return -EINVAL;
2948 }
f1b9dd5e
JM
2949
2950 /*
2951 * If the requested region crosses two memslots, we still
2952 * verify that the entire region is valid here.
2953 */
6ad1e29f 2954 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
f1b9dd5e
JM
2955 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2956 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2957 &nr_pages_avail);
2958 if (kvm_is_error_hva(ghc->hva))
6ad1e29f 2959 return -EFAULT;
f1b9dd5e
JM
2960 }
2961
2962 /* Use the slow path for cross page reads and writes. */
6ad1e29f 2963 if (nr_pages_needed == 1)
49c7754c 2964 ghc->hva += offset;
f1b9dd5e 2965 else
8f964525 2966 ghc->memslot = NULL;
f1b9dd5e 2967
6ad1e29f
SC
2968 ghc->gpa = gpa;
2969 ghc->len = len;
2970 return 0;
49c7754c 2971}
5a2d4365 2972
4e335d9e 2973int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
5a2d4365
PB
2974 gpa_t gpa, unsigned long len)
2975{
4e335d9e 2976 struct kvm_memslots *slots = kvm_memslots(kvm);
5a2d4365
PB
2977 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2978}
4e335d9e 2979EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
49c7754c 2980
4e335d9e 2981int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
7a86dab8
JM
2982 void *data, unsigned int offset,
2983 unsigned long len)
49c7754c 2984{
4e335d9e 2985 struct kvm_memslots *slots = kvm_memslots(kvm);
49c7754c 2986 int r;
4ec6e863 2987 gpa_t gpa = ghc->gpa + offset;
49c7754c 2988
4ec6e863 2989 BUG_ON(len + offset > ghc->len);
8f964525 2990
dc9ce71e
SC
2991 if (slots->generation != ghc->generation) {
2992 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2993 return -EFAULT;
2994 }
8f964525 2995
49c7754c
GN
2996 if (kvm_is_error_hva(ghc->hva))
2997 return -EFAULT;
2998
fcfbc617
SC
2999 if (unlikely(!ghc->memslot))
3000 return kvm_write_guest(kvm, gpa, data, len);
3001
4ec6e863 3002 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
49c7754c
GN
3003 if (r)
3004 return -EFAULT;
28bd726a 3005 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
49c7754c
GN
3006
3007 return 0;
3008}
4e335d9e 3009EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
4ec6e863 3010
4e335d9e
PB
3011int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3012 void *data, unsigned long len)
4ec6e863 3013{
4e335d9e 3014 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
4ec6e863 3015}
4e335d9e 3016EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
49c7754c 3017
0958f0ce
VK
3018int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3019 void *data, unsigned int offset,
3020 unsigned long len)
e03b644f 3021{
4e335d9e 3022 struct kvm_memslots *slots = kvm_memslots(kvm);
e03b644f 3023 int r;
0958f0ce 3024 gpa_t gpa = ghc->gpa + offset;
e03b644f 3025
0958f0ce 3026 BUG_ON(len + offset > ghc->len);
8f964525 3027
dc9ce71e
SC
3028 if (slots->generation != ghc->generation) {
3029 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3030 return -EFAULT;
3031 }
8f964525 3032
e03b644f
GN
3033 if (kvm_is_error_hva(ghc->hva))
3034 return -EFAULT;
3035
fcfbc617 3036 if (unlikely(!ghc->memslot))
0958f0ce 3037 return kvm_read_guest(kvm, gpa, data, len);
fcfbc617 3038
0958f0ce 3039 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
e03b644f
GN
3040 if (r)
3041 return -EFAULT;
3042
3043 return 0;
3044}
0958f0ce
VK
3045EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3046
3047int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3048 void *data, unsigned long len)
3049{
3050 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3051}
4e335d9e 3052EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
e03b644f 3053
195aefde
IE
3054int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3055{
2f541442 3056 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
195aefde
IE
3057 gfn_t gfn = gpa >> PAGE_SHIFT;
3058 int seg;
3059 int offset = offset_in_page(gpa);
3060 int ret;
3061
bfda0e84 3062 while ((seg = next_segment(len, offset)) != 0) {
2f541442 3063 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
195aefde
IE
3064 if (ret < 0)
3065 return ret;
3066 offset = 0;
3067 len -= seg;
3068 ++gfn;
3069 }
3070 return 0;
3071}
3072EXPORT_SYMBOL_GPL(kvm_clear_guest);
3073
28bd726a
PX
3074void mark_page_dirty_in_slot(struct kvm *kvm,
3075 struct kvm_memory_slot *memslot,
3076 gfn_t gfn)
6aa8b732 3077{
044c59c4 3078 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
7e9d619d 3079 unsigned long rel_gfn = gfn - memslot->base_gfn;
fb04a1ed 3080 u32 slot = (memslot->as_id << 16) | memslot->id;
6aa8b732 3081
fb04a1ed
PX
3082 if (kvm->dirty_ring_size)
3083 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
3084 slot, rel_gfn);
3085 else
3086 set_bit_le(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
3087 }
3088}
a6a0b05d 3089EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
6aa8b732 3090
49c7754c
GN
3091void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3092{
3093 struct kvm_memory_slot *memslot;
3094
3095 memslot = gfn_to_memslot(kvm, gfn);
28bd726a 3096 mark_page_dirty_in_slot(kvm, memslot, gfn);
49c7754c 3097}
2ba9f0d8 3098EXPORT_SYMBOL_GPL(mark_page_dirty);
49c7754c 3099
8e73485c
PB
3100void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3101{
3102 struct kvm_memory_slot *memslot;
3103
3104 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
28bd726a 3105 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
8e73485c
PB
3106}
3107EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3108
20b7035c
JS
3109void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3110{
3111 if (!vcpu->sigset_active)
3112 return;
3113
3114 /*
3115 * This does a lockless modification of ->real_blocked, which is fine
3116 * because, only current can change ->real_blocked and all readers of
3117 * ->real_blocked don't care as long ->real_blocked is always a subset
3118 * of ->blocked.
3119 */
3120 sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3121}
3122
3123void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3124{
3125 if (!vcpu->sigset_active)
3126 return;
3127
3128 sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3129 sigemptyset(&current->real_blocked);
3130}
3131
aca6ff29
WL
3132static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3133{
dee339b5 3134 unsigned int old, val, grow, grow_start;
aca6ff29 3135
2cbd7824 3136 old = val = vcpu->halt_poll_ns;
dee339b5 3137 grow_start = READ_ONCE(halt_poll_ns_grow_start);
6b6de68c 3138 grow = READ_ONCE(halt_poll_ns_grow);
7fa08e71
NW
3139 if (!grow)
3140 goto out;
3141
dee339b5
NW
3142 val *= grow;
3143 if (val < grow_start)
3144 val = grow_start;
aca6ff29 3145
258785ef
DM
3146 if (val > vcpu->kvm->max_halt_poll_ns)
3147 val = vcpu->kvm->max_halt_poll_ns;
313f636d 3148
aca6ff29 3149 vcpu->halt_poll_ns = val;
7fa08e71 3150out:
2cbd7824 3151 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
aca6ff29
WL
3152}
3153
3154static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3155{
6b6de68c 3156 unsigned int old, val, shrink;
aca6ff29 3157
2cbd7824 3158 old = val = vcpu->halt_poll_ns;
6b6de68c
CB
3159 shrink = READ_ONCE(halt_poll_ns_shrink);
3160 if (shrink == 0)
aca6ff29
WL
3161 val = 0;
3162 else
6b6de68c 3163 val /= shrink;
aca6ff29
WL
3164
3165 vcpu->halt_poll_ns = val;
2cbd7824 3166 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
aca6ff29
WL
3167}
3168
f7819512
PB
3169static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3170{
50c28f21
JS
3171 int ret = -EINTR;
3172 int idx = srcu_read_lock(&vcpu->kvm->srcu);
3173
f7819512
PB
3174 if (kvm_arch_vcpu_runnable(vcpu)) {
3175 kvm_make_request(KVM_REQ_UNHALT, vcpu);
50c28f21 3176 goto out;
f7819512
PB
3177 }
3178 if (kvm_cpu_has_pending_timer(vcpu))
50c28f21 3179 goto out;
f7819512 3180 if (signal_pending(current))
50c28f21 3181 goto out;
084071d5
MT
3182 if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3183 goto out;
f7819512 3184
50c28f21
JS
3185 ret = 0;
3186out:
3187 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3188 return ret;
f7819512
PB
3189}
3190
cb953129
DM
3191static inline void
3192update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
3193{
3194 if (waited)
0193cc90 3195 vcpu->stat.generic.halt_poll_fail_ns += poll_ns;
cb953129 3196 else
0193cc90 3197 vcpu->stat.generic.halt_poll_success_ns += poll_ns;
cb953129
DM
3198}
3199
b6958ce4
ED
3200/*
3201 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
3202 */
8776e519 3203void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 3204{
cb953129 3205 ktime_t start, cur, poll_end;
f7819512 3206 bool waited = false;
aca6ff29 3207 u64 block_ns;
f7819512 3208
07ab0f8d
MZ
3209 kvm_arch_vcpu_blocking(vcpu);
3210
cb953129 3211 start = cur = poll_end = ktime_get();
cdd6ad3a 3212 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
19020f8a 3213 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
f95ef0cd 3214
0193cc90 3215 ++vcpu->stat.generic.halt_attempted_poll;
f7819512
PB
3216 do {
3217 /*
3218 * This sets KVM_REQ_UNHALT if an interrupt
3219 * arrives.
3220 */
3221 if (kvm_vcpu_check_block(vcpu) < 0) {
0193cc90 3222 ++vcpu->stat.generic.halt_successful_poll;
3491caf2 3223 if (!vcpu_valid_wakeup(vcpu))
0193cc90 3224 ++vcpu->stat.generic.halt_poll_invalid;
f7819512
PB
3225 goto out;
3226 }
74775654 3227 cpu_relax();
cb953129 3228 poll_end = cur = ktime_get();
6bd5b743 3229 } while (kvm_vcpu_can_poll(cur, stop));
f7819512 3230 }
e5c239cf 3231
da4ad88c 3232 prepare_to_rcuwait(&vcpu->wait);
e5c239cf 3233 for (;;) {
da4ad88c 3234 set_current_state(TASK_INTERRUPTIBLE);
e5c239cf 3235
f7819512 3236 if (kvm_vcpu_check_block(vcpu) < 0)
e5c239cf
MT
3237 break;
3238
f7819512 3239 waited = true;
b6958ce4 3240 schedule();
b6958ce4 3241 }
da4ad88c 3242 finish_rcuwait(&vcpu->wait);
f7819512 3243 cur = ktime_get();
87bcc5fa
JZ
3244 if (waited) {
3245 vcpu->stat.generic.halt_wait_ns +=
3246 ktime_to_ns(cur) - ktime_to_ns(poll_end);
3247 }
f7819512 3248out:
07ab0f8d 3249 kvm_arch_vcpu_unblocking(vcpu);
aca6ff29
WL
3250 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3251
cb953129
DM
3252 update_halt_poll_stats(
3253 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
3254
44551b2f
WL
3255 if (!kvm_arch_no_poll(vcpu)) {
3256 if (!vcpu_valid_wakeup(vcpu)) {
aca6ff29 3257 shrink_halt_poll_ns(vcpu);
acd05785 3258 } else if (vcpu->kvm->max_halt_poll_ns) {
44551b2f
WL
3259 if (block_ns <= vcpu->halt_poll_ns)
3260 ;
3261 /* we had a long block, shrink polling */
acd05785
DM
3262 else if (vcpu->halt_poll_ns &&
3263 block_ns > vcpu->kvm->max_halt_poll_ns)
44551b2f
WL
3264 shrink_halt_poll_ns(vcpu);
3265 /* we had a short halt and our poll time is too small */
acd05785
DM
3266 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3267 block_ns < vcpu->kvm->max_halt_poll_ns)
44551b2f
WL
3268 grow_halt_poll_ns(vcpu);
3269 } else {
3270 vcpu->halt_poll_ns = 0;
3271 }
3272 }
aca6ff29 3273
3491caf2
CB
3274 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
3275 kvm_arch_vcpu_block_finish(vcpu);
b6958ce4 3276}
2ba9f0d8 3277EXPORT_SYMBOL_GPL(kvm_vcpu_block);
b6958ce4 3278
178f02ff 3279bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
b6d33834 3280{
da4ad88c 3281 struct rcuwait *waitp;
b6d33834 3282
da4ad88c
DB
3283 waitp = kvm_arch_vcpu_get_wait(vcpu);
3284 if (rcuwait_wake_up(waitp)) {
d73eb57b 3285 WRITE_ONCE(vcpu->ready, true);
0193cc90 3286 ++vcpu->stat.generic.halt_wakeup;
178f02ff 3287 return true;
b6d33834
CD
3288 }
3289
178f02ff 3290 return false;
dd1a4cc1
RK
3291}
3292EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3293
0266c894 3294#ifndef CONFIG_S390
dd1a4cc1
RK
3295/*
3296 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3297 */
3298void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3299{
3300 int me;
3301 int cpu = vcpu->cpu;
3302
178f02ff
RK
3303 if (kvm_vcpu_wake_up(vcpu))
3304 return;
3305
b6d33834
CD
3306 me = get_cpu();
3307 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3308 if (kvm_arch_vcpu_should_kick(vcpu))
3309 smp_send_reschedule(cpu);
3310 put_cpu();
3311}
a20ed54d 3312EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
0266c894 3313#endif /* !CONFIG_S390 */
b6d33834 3314
fa93384f 3315int kvm_vcpu_yield_to(struct kvm_vcpu *target)
41628d33
KW
3316{
3317 struct pid *pid;
3318 struct task_struct *task = NULL;
fa93384f 3319 int ret = 0;
41628d33
KW
3320
3321 rcu_read_lock();
3322 pid = rcu_dereference(target->pid);
3323 if (pid)
27fbe64b 3324 task = get_pid_task(pid, PIDTYPE_PID);
41628d33
KW
3325 rcu_read_unlock();
3326 if (!task)
c45c528e 3327 return ret;
c45c528e 3328 ret = yield_to(task, 1);
41628d33 3329 put_task_struct(task);
c45c528e
R
3330
3331 return ret;
41628d33
KW
3332}
3333EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3334
06e48c51
R
3335/*
3336 * Helper that checks whether a VCPU is eligible for directed yield.
3337 * Most eligible candidate to yield is decided by following heuristics:
3338 *
3339 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3340 * (preempted lock holder), indicated by @in_spin_loop.
656012c7 3341 * Set at the beginning and cleared at the end of interception/PLE handler.
06e48c51
R
3342 *
3343 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3344 * chance last time (mostly it has become eligible now since we have probably
3345 * yielded to lockholder in last iteration. This is done by toggling
3346 * @dy_eligible each time a VCPU checked for eligibility.)
3347 *
3348 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3349 * to preempted lock-holder could result in wrong VCPU selection and CPU
3350 * burning. Giving priority for a potential lock-holder increases lock
3351 * progress.
3352 *
3353 * Since algorithm is based on heuristics, accessing another VCPU data without
3354 * locking does not harm. It may result in trying to yield to same VCPU, fail
3355 * and continue with next VCPU and so on.
3356 */
7940876e 3357static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
06e48c51 3358{
4a55dd72 3359#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
06e48c51
R
3360 bool eligible;
3361
3362 eligible = !vcpu->spin_loop.in_spin_loop ||
34656113 3363 vcpu->spin_loop.dy_eligible;
06e48c51
R
3364
3365 if (vcpu->spin_loop.in_spin_loop)
3366 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3367
3368 return eligible;
4a55dd72
SW
3369#else
3370 return true;
06e48c51 3371#endif
4a55dd72 3372}
c45c528e 3373
17e433b5
WL
3374/*
3375 * Unlike kvm_arch_vcpu_runnable, this function is called outside
3376 * a vcpu_load/vcpu_put pair. However, for most architectures
3377 * kvm_arch_vcpu_runnable does not require vcpu_load.
3378 */
3379bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3380{
3381 return kvm_arch_vcpu_runnable(vcpu);
3382}
3383
3384static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3385{
3386 if (kvm_arch_dy_runnable(vcpu))
3387 return true;
3388
3389#ifdef CONFIG_KVM_ASYNC_PF
3390 if (!list_empty_careful(&vcpu->async_pf.done))
3391 return true;
3392#endif
3393
3394 return false;
3395}
3396
52acd22f
WL
3397bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3398{
3399 return false;
3400}
3401
199b5763 3402void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
d255f4f2 3403{
217ece61
RR
3404 struct kvm *kvm = me->kvm;
3405 struct kvm_vcpu *vcpu;
3406 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3407 int yielded = 0;
c45c528e 3408 int try = 3;
217ece61
RR
3409 int pass;
3410 int i;
d255f4f2 3411
4c088493 3412 kvm_vcpu_set_in_spin_loop(me, true);
217ece61
RR
3413 /*
3414 * We boost the priority of a VCPU that is runnable but not
3415 * currently running, because it got preempted by something
3416 * else and called schedule in __vcpu_run. Hopefully that
3417 * VCPU is holding the lock that we need and will release it.
3418 * We approximate round-robin by starting at the last boosted VCPU.
3419 */
c45c528e 3420 for (pass = 0; pass < 2 && !yielded && try; pass++) {
217ece61 3421 kvm_for_each_vcpu(i, vcpu, kvm) {
5cfc2aab 3422 if (!pass && i <= last_boosted_vcpu) {
217ece61
RR
3423 i = last_boosted_vcpu;
3424 continue;
3425 } else if (pass && i > last_boosted_vcpu)
3426 break;
d73eb57b 3427 if (!READ_ONCE(vcpu->ready))
7bc7ae25 3428 continue;
217ece61
RR
3429 if (vcpu == me)
3430 continue;
da4ad88c
DB
3431 if (rcuwait_active(&vcpu->wait) &&
3432 !vcpu_dy_runnable(vcpu))
217ece61 3433 continue;
046ddeed 3434 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
52acd22f
WL
3435 !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3436 !kvm_arch_vcpu_in_kernel(vcpu))
199b5763 3437 continue;
06e48c51
R
3438 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3439 continue;
c45c528e
R
3440
3441 yielded = kvm_vcpu_yield_to(vcpu);
3442 if (yielded > 0) {
217ece61 3443 kvm->last_boosted_vcpu = i;
217ece61 3444 break;
c45c528e
R
3445 } else if (yielded < 0) {
3446 try--;
3447 if (!try)
3448 break;
217ece61 3449 }
217ece61
RR
3450 }
3451 }
4c088493 3452 kvm_vcpu_set_in_spin_loop(me, false);
06e48c51
R
3453
3454 /* Ensure vcpu is not eligible during next spinloop */
3455 kvm_vcpu_set_dy_eligible(me, false);
d255f4f2
ZE
3456}
3457EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3458
fb04a1ed
PX
3459static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3460{
3461#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
3462 return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3463 (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3464 kvm->dirty_ring_size / PAGE_SIZE);
3465#else
3466 return false;
3467#endif
3468}
3469
1499fa80 3470static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
9a2bb7f4 3471{
11bac800 3472 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
9a2bb7f4
AK
3473 struct page *page;
3474
e4a533a4 3475 if (vmf->pgoff == 0)
039576c0 3476 page = virt_to_page(vcpu->run);
09566765 3477#ifdef CONFIG_X86
e4a533a4 3478 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 3479 page = virt_to_page(vcpu->arch.pio_data);
5f94c174 3480#endif
4b4357e0 3481#ifdef CONFIG_KVM_MMIO
5f94c174
LV
3482 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3483 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 3484#endif
fb04a1ed
PX
3485 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3486 page = kvm_dirty_ring_get_page(
3487 &vcpu->dirty_ring,
3488 vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
039576c0 3489 else
5b1c1493 3490 return kvm_arch_vcpu_fault(vcpu, vmf);
9a2bb7f4 3491 get_page(page);
e4a533a4
NP
3492 vmf->page = page;
3493 return 0;
9a2bb7f4
AK
3494}
3495
f0f37e2f 3496static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 3497 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
3498};
3499
3500static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3501{
fb04a1ed
PX
3502 struct kvm_vcpu *vcpu = file->private_data;
3503 unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3504
3505 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3506 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3507 ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3508 return -EINVAL;
3509
9a2bb7f4
AK
3510 vma->vm_ops = &kvm_vcpu_vm_ops;
3511 return 0;
3512}
3513
bccf2150
AK
3514static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3515{
3516 struct kvm_vcpu *vcpu = filp->private_data;
3517
66c0b394 3518 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
3519 return 0;
3520}
3521
3d3aab1b 3522static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
3523 .release = kvm_vcpu_release,
3524 .unlocked_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 3525 .mmap = kvm_vcpu_mmap,
6038f373 3526 .llseek = noop_llseek,
7ddfd3e0 3527 KVM_COMPAT(kvm_vcpu_compat_ioctl),
bccf2150
AK
3528};
3529
3530/*
3531 * Allocates an inode for the vcpu.
3532 */
3533static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3534{
e46b4692
MY
3535 char name[8 + 1 + ITOA_MAX_LEN + 1];
3536
3537 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3538 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
bccf2150
AK
3539}
3540
3e7093d0 3541static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
45b5939e 3542{
741cbbae 3543#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
d56f5136 3544 struct dentry *debugfs_dentry;
45b5939e 3545 char dir_name[ITOA_MAX_LEN * 2];
45b5939e 3546
45b5939e 3547 if (!debugfs_initialized())
3e7093d0 3548 return;
45b5939e
LC
3549
3550 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
d56f5136
PB
3551 debugfs_dentry = debugfs_create_dir(dir_name,
3552 vcpu->kvm->debugfs_dentry);
45b5939e 3553
d56f5136 3554 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
741cbbae 3555#endif
45b5939e
LC
3556}
3557
c5ea7660
AK
3558/*
3559 * Creates some virtual cpus. Good luck creating more than one.
3560 */
73880c80 3561static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
3562{
3563 int r;
e09fefde 3564 struct kvm_vcpu *vcpu;
8bd826d6 3565 struct page *page;
c5ea7660 3566
0b1b1dfd 3567 if (id >= KVM_MAX_VCPU_ID)
338c7dba
AH
3568 return -EINVAL;
3569
6c7caebc
PB
3570 mutex_lock(&kvm->lock);
3571 if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3572 mutex_unlock(&kvm->lock);
3573 return -EINVAL;
3574 }
3575
3576 kvm->created_vcpus++;
3577 mutex_unlock(&kvm->lock);
3578
897cc38e
SC
3579 r = kvm_arch_vcpu_precreate(kvm, id);
3580 if (r)
3581 goto vcpu_decrement;
3582
85f47930 3583 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
e529ef66
SC
3584 if (!vcpu) {
3585 r = -ENOMEM;
6c7caebc
PB
3586 goto vcpu_decrement;
3587 }
c5ea7660 3588
fcd97ad5 3589 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
93bb59ca 3590 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
8bd826d6
SC
3591 if (!page) {
3592 r = -ENOMEM;
e529ef66 3593 goto vcpu_free;
8bd826d6
SC
3594 }
3595 vcpu->run = page_address(page);
3596
3597 kvm_vcpu_init(vcpu, kvm, id);
e529ef66
SC
3598
3599 r = kvm_arch_vcpu_create(vcpu);
3600 if (r)
8bd826d6 3601 goto vcpu_free_run_page;
e529ef66 3602
fb04a1ed
PX
3603 if (kvm->dirty_ring_size) {
3604 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3605 id, kvm->dirty_ring_size);
3606 if (r)
3607 goto arch_vcpu_destroy;
3608 }
3609
11ec2804 3610 mutex_lock(&kvm->lock);
e09fefde
DH
3611 if (kvm_get_vcpu_by_id(kvm, id)) {
3612 r = -EEXIST;
3613 goto unlock_vcpu_destroy;
3614 }
73880c80 3615
8750e72a
RK
3616 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3617 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
c5ea7660 3618
ce55c049
JZ
3619 /* Fill the stats id string for the vcpu */
3620 snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
3621 task_pid_nr(current), id);
3622
fb3f0f51 3623 /* Now it's all set up, let userspace reach it */
66c0b394 3624 kvm_get_kvm(kvm);
bccf2150 3625 r = create_vcpu_fd(vcpu);
73880c80 3626 if (r < 0) {
149487bd 3627 kvm_put_kvm_no_destroy(kvm);
d780592b 3628 goto unlock_vcpu_destroy;
73880c80
GN
3629 }
3630
8750e72a 3631 kvm->vcpus[vcpu->vcpu_idx] = vcpu;
dd489240
PB
3632
3633 /*
3634 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
3635 * before kvm->online_vcpu's incremented value.
3636 */
73880c80
GN
3637 smp_wmb();
3638 atomic_inc(&kvm->online_vcpus);
3639
73880c80 3640 mutex_unlock(&kvm->lock);
42897d86 3641 kvm_arch_vcpu_postcreate(vcpu);
63d04348 3642 kvm_create_vcpu_debugfs(vcpu);
fb3f0f51 3643 return r;
39c3b86e 3644
d780592b 3645unlock_vcpu_destroy:
7d8fece6 3646 mutex_unlock(&kvm->lock);
fb04a1ed
PX
3647 kvm_dirty_ring_free(&vcpu->dirty_ring);
3648arch_vcpu_destroy:
d40ccc62 3649 kvm_arch_vcpu_destroy(vcpu);
8bd826d6
SC
3650vcpu_free_run_page:
3651 free_page((unsigned long)vcpu->run);
e529ef66
SC
3652vcpu_free:
3653 kmem_cache_free(kvm_vcpu_cache, vcpu);
6c7caebc
PB
3654vcpu_decrement:
3655 mutex_lock(&kvm->lock);
3656 kvm->created_vcpus--;
3657 mutex_unlock(&kvm->lock);
c5ea7660
AK
3658 return r;
3659}
3660
1961d276
AK
3661static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3662{
3663 if (sigset) {
3664 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3665 vcpu->sigset_active = 1;
3666 vcpu->sigset = *sigset;
3667 } else
3668 vcpu->sigset_active = 0;
3669 return 0;
3670}
3671
ce55c049
JZ
3672static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3673 size_t size, loff_t *offset)
3674{
3675 struct kvm_vcpu *vcpu = file->private_data;
3676
3677 return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3678 &kvm_vcpu_stats_desc[0], &vcpu->stat,
3679 sizeof(vcpu->stat), user_buffer, size, offset);
3680}
3681
3682static const struct file_operations kvm_vcpu_stats_fops = {
3683 .read = kvm_vcpu_stats_read,
3684 .llseek = noop_llseek,
3685};
3686
3687static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3688{
3689 int fd;
3690 struct file *file;
3691 char name[15 + ITOA_MAX_LEN + 1];
3692
3693 snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
3694
3695 fd = get_unused_fd_flags(O_CLOEXEC);
3696 if (fd < 0)
3697 return fd;
3698
3699 file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
3700 if (IS_ERR(file)) {
3701 put_unused_fd(fd);
3702 return PTR_ERR(file);
3703 }
3704 file->f_mode |= FMODE_PREAD;
3705 fd_install(fd, file);
3706
3707 return fd;
3708}
3709
bccf2150
AK
3710static long kvm_vcpu_ioctl(struct file *filp,
3711 unsigned int ioctl, unsigned long arg)
6aa8b732 3712{
bccf2150 3713 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 3714 void __user *argp = (void __user *)arg;
313a3dc7 3715 int r;
fa3795a7
DH
3716 struct kvm_fpu *fpu = NULL;
3717 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 3718
0b8f1173 3719 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
6d4e4c4f 3720 return -EIO;
2122ff5e 3721
2ea75be3
DM
3722 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3723 return -EINVAL;
3724
2122ff5e 3725 /*
5cb0944c
PB
3726 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3727 * execution; mutex_lock() would break them.
2122ff5e 3728 */
5cb0944c
PB
3729 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3730 if (r != -ENOIOCTLCMD)
9fc77441 3731 return r;
2122ff5e 3732
ec7660cc
CD
3733 if (mutex_lock_killable(&vcpu->mutex))
3734 return -EINTR;
6aa8b732 3735 switch (ioctl) {
0e4524a5
CB
3736 case KVM_RUN: {
3737 struct pid *oldpid;
f0fe5108
AK
3738 r = -EINVAL;
3739 if (arg)
3740 goto out;
0e4524a5 3741 oldpid = rcu_access_pointer(vcpu->pid);
71dbc8a9 3742 if (unlikely(oldpid != task_pid(current))) {
7a72f7a1 3743 /* The thread running this VCPU changed. */
bd2a6394 3744 struct pid *newpid;
f95ef0cd 3745
bd2a6394
CD
3746 r = kvm_arch_vcpu_run_pid_change(vcpu);
3747 if (r)
3748 break;
3749
3750 newpid = get_task_pid(current, PIDTYPE_PID);
7a72f7a1
CB
3751 rcu_assign_pointer(vcpu->pid, newpid);
3752 if (oldpid)
3753 synchronize_rcu();
3754 put_pid(oldpid);
3755 }
1b94f6f8 3756 r = kvm_arch_vcpu_ioctl_run(vcpu);
64be5007 3757 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
6aa8b732 3758 break;
0e4524a5 3759 }
6aa8b732 3760 case KVM_GET_REGS: {
3e4bb3ac 3761 struct kvm_regs *kvm_regs;
6aa8b732 3762
3e4bb3ac 3763 r = -ENOMEM;
b12ce36a 3764 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3e4bb3ac 3765 if (!kvm_regs)
6aa8b732 3766 goto out;
3e4bb3ac
XZ
3767 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3768 if (r)
3769 goto out_free1;
6aa8b732 3770 r = -EFAULT;
3e4bb3ac
XZ
3771 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3772 goto out_free1;
6aa8b732 3773 r = 0;
3e4bb3ac
XZ
3774out_free1:
3775 kfree(kvm_regs);
6aa8b732
AK
3776 break;
3777 }
3778 case KVM_SET_REGS: {
3e4bb3ac 3779 struct kvm_regs *kvm_regs;
6aa8b732 3780
ff5c2c03
SL
3781 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3782 if (IS_ERR(kvm_regs)) {
3783 r = PTR_ERR(kvm_regs);
6aa8b732 3784 goto out;
ff5c2c03 3785 }
3e4bb3ac 3786 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3e4bb3ac 3787 kfree(kvm_regs);
6aa8b732
AK
3788 break;
3789 }
3790 case KVM_GET_SREGS: {
b12ce36a
BG
3791 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3792 GFP_KERNEL_ACCOUNT);
fa3795a7
DH
3793 r = -ENOMEM;
3794 if (!kvm_sregs)
3795 goto out;
3796 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
3797 if (r)
3798 goto out;
3799 r = -EFAULT;
fa3795a7 3800 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
3801 goto out;
3802 r = 0;
3803 break;
3804 }
3805 case KVM_SET_SREGS: {
ff5c2c03
SL
3806 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3807 if (IS_ERR(kvm_sregs)) {
3808 r = PTR_ERR(kvm_sregs);
18595411 3809 kvm_sregs = NULL;
6aa8b732 3810 goto out;
ff5c2c03 3811 }
fa3795a7 3812 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
3813 break;
3814 }
62d9f0db
MT
3815 case KVM_GET_MP_STATE: {
3816 struct kvm_mp_state mp_state;
3817
3818 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3819 if (r)
3820 goto out;
3821 r = -EFAULT;
893bdbf1 3822 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
62d9f0db
MT
3823 goto out;
3824 r = 0;
3825 break;
3826 }
3827 case KVM_SET_MP_STATE: {
3828 struct kvm_mp_state mp_state;
3829
3830 r = -EFAULT;
893bdbf1 3831 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
62d9f0db
MT
3832 goto out;
3833 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
62d9f0db
MT
3834 break;
3835 }
6aa8b732
AK
3836 case KVM_TRANSLATE: {
3837 struct kvm_translation tr;
3838
3839 r = -EFAULT;
893bdbf1 3840 if (copy_from_user(&tr, argp, sizeof(tr)))
6aa8b732 3841 goto out;
8b006791 3842 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
3843 if (r)
3844 goto out;
3845 r = -EFAULT;
893bdbf1 3846 if (copy_to_user(argp, &tr, sizeof(tr)))
6aa8b732
AK
3847 goto out;
3848 r = 0;
3849 break;
3850 }
d0bfb940
JK
3851 case KVM_SET_GUEST_DEBUG: {
3852 struct kvm_guest_debug dbg;
6aa8b732
AK
3853
3854 r = -EFAULT;
893bdbf1 3855 if (copy_from_user(&dbg, argp, sizeof(dbg)))
6aa8b732 3856 goto out;
d0bfb940 3857 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
3858 break;
3859 }
1961d276
AK
3860 case KVM_SET_SIGNAL_MASK: {
3861 struct kvm_signal_mask __user *sigmask_arg = argp;
3862 struct kvm_signal_mask kvm_sigmask;
3863 sigset_t sigset, *p;
3864
3865 p = NULL;
3866 if (argp) {
3867 r = -EFAULT;
3868 if (copy_from_user(&kvm_sigmask, argp,
893bdbf1 3869 sizeof(kvm_sigmask)))
1961d276
AK
3870 goto out;
3871 r = -EINVAL;
893bdbf1 3872 if (kvm_sigmask.len != sizeof(sigset))
1961d276
AK
3873 goto out;
3874 r = -EFAULT;
3875 if (copy_from_user(&sigset, sigmask_arg->sigset,
893bdbf1 3876 sizeof(sigset)))
1961d276
AK
3877 goto out;
3878 p = &sigset;
3879 }
376d41ff 3880 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1961d276
AK
3881 break;
3882 }
b8836737 3883 case KVM_GET_FPU: {
b12ce36a 3884 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
fa3795a7
DH
3885 r = -ENOMEM;
3886 if (!fpu)
3887 goto out;
3888 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
3889 if (r)
3890 goto out;
3891 r = -EFAULT;
fa3795a7 3892 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
3893 goto out;
3894 r = 0;
3895 break;
3896 }
3897 case KVM_SET_FPU: {
ff5c2c03
SL
3898 fpu = memdup_user(argp, sizeof(*fpu));
3899 if (IS_ERR(fpu)) {
3900 r = PTR_ERR(fpu);
18595411 3901 fpu = NULL;
b8836737 3902 goto out;
ff5c2c03 3903 }
fa3795a7 3904 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
3905 break;
3906 }
ce55c049
JZ
3907 case KVM_GET_STATS_FD: {
3908 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
3909 break;
3910 }
bccf2150 3911 default:
313a3dc7 3912 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
3913 }
3914out:
ec7660cc 3915 mutex_unlock(&vcpu->mutex);
fa3795a7
DH
3916 kfree(fpu);
3917 kfree(kvm_sregs);
bccf2150
AK
3918 return r;
3919}
3920
de8e5d74 3921#ifdef CONFIG_KVM_COMPAT
1dda606c
AG
3922static long kvm_vcpu_compat_ioctl(struct file *filp,
3923 unsigned int ioctl, unsigned long arg)
3924{
3925 struct kvm_vcpu *vcpu = filp->private_data;
3926 void __user *argp = compat_ptr(arg);
3927 int r;
3928
0b8f1173 3929 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
1dda606c
AG
3930 return -EIO;
3931
3932 switch (ioctl) {
3933 case KVM_SET_SIGNAL_MASK: {
3934 struct kvm_signal_mask __user *sigmask_arg = argp;
3935 struct kvm_signal_mask kvm_sigmask;
1dda606c
AG
3936 sigset_t sigset;
3937
3938 if (argp) {
3939 r = -EFAULT;
3940 if (copy_from_user(&kvm_sigmask, argp,
893bdbf1 3941 sizeof(kvm_sigmask)))
1dda606c
AG
3942 goto out;
3943 r = -EINVAL;
3968cf62 3944 if (kvm_sigmask.len != sizeof(compat_sigset_t))
1dda606c
AG
3945 goto out;
3946 r = -EFAULT;
1393b4aa
PB
3947 if (get_compat_sigset(&sigset,
3948 (compat_sigset_t __user *)sigmask_arg->sigset))
1dda606c 3949 goto out;
760a9a30
AC
3950 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3951 } else
3952 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
1dda606c
AG
3953 break;
3954 }
3955 default:
3956 r = kvm_vcpu_ioctl(filp, ioctl, arg);
3957 }
3958
3959out:
3960 return r;
3961}
3962#endif
3963
a1cd3f08
CLG
3964static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
3965{
3966 struct kvm_device *dev = filp->private_data;
3967
3968 if (dev->ops->mmap)
3969 return dev->ops->mmap(dev, vma);
3970
3971 return -ENODEV;
3972}
3973
852b6d57
SW
3974static int kvm_device_ioctl_attr(struct kvm_device *dev,
3975 int (*accessor)(struct kvm_device *dev,
3976 struct kvm_device_attr *attr),
3977 unsigned long arg)
3978{
3979 struct kvm_device_attr attr;
3980
3981 if (!accessor)
3982 return -EPERM;
3983
3984 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3985 return -EFAULT;
3986
3987 return accessor(dev, &attr);
3988}
3989
3990static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3991 unsigned long arg)
3992{
3993 struct kvm_device *dev = filp->private_data;
3994
0b8f1173 3995 if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged)
ddba9180
SC
3996 return -EIO;
3997
852b6d57
SW
3998 switch (ioctl) {
3999 case KVM_SET_DEVICE_ATTR:
4000 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4001 case KVM_GET_DEVICE_ATTR:
4002 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4003 case KVM_HAS_DEVICE_ATTR:
4004 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4005 default:
4006 if (dev->ops->ioctl)
4007 return dev->ops->ioctl(dev, ioctl, arg);
4008
4009 return -ENOTTY;
4010 }
4011}
4012
852b6d57
SW
4013static int kvm_device_release(struct inode *inode, struct file *filp)
4014{
4015 struct kvm_device *dev = filp->private_data;
4016 struct kvm *kvm = dev->kvm;
4017
2bde9b3e
CLG
4018 if (dev->ops->release) {
4019 mutex_lock(&kvm->lock);
4020 list_del(&dev->vm_node);
4021 dev->ops->release(dev);
4022 mutex_unlock(&kvm->lock);
4023 }
4024
852b6d57
SW
4025 kvm_put_kvm(kvm);
4026 return 0;
4027}
4028
4029static const struct file_operations kvm_device_fops = {
4030 .unlocked_ioctl = kvm_device_ioctl,
4031 .release = kvm_device_release,
7ddfd3e0 4032 KVM_COMPAT(kvm_device_ioctl),
a1cd3f08 4033 .mmap = kvm_device_mmap,
852b6d57
SW
4034};
4035
4036struct kvm_device *kvm_device_from_filp(struct file *filp)
4037{
4038 if (filp->f_op != &kvm_device_fops)
4039 return NULL;
4040
4041 return filp->private_data;
4042}
4043
8538cb22 4044static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
5df554ad 4045#ifdef CONFIG_KVM_MPIC
d60eacb0
WD
4046 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
4047 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
5975a2e0 4048#endif
d60eacb0
WD
4049};
4050
8538cb22 4051int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
d60eacb0
WD
4052{
4053 if (type >= ARRAY_SIZE(kvm_device_ops_table))
4054 return -ENOSPC;
4055
4056 if (kvm_device_ops_table[type] != NULL)
4057 return -EEXIST;
4058
4059 kvm_device_ops_table[type] = ops;
4060 return 0;
4061}
4062
571ee1b6
WL
4063void kvm_unregister_device_ops(u32 type)
4064{
4065 if (kvm_device_ops_table[type] != NULL)
4066 kvm_device_ops_table[type] = NULL;
4067}
4068
852b6d57
SW
4069static int kvm_ioctl_create_device(struct kvm *kvm,
4070 struct kvm_create_device *cd)
4071{
8538cb22 4072 const struct kvm_device_ops *ops = NULL;
852b6d57
SW
4073 struct kvm_device *dev;
4074 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
1d487e9b 4075 int type;
852b6d57
SW
4076 int ret;
4077
d60eacb0
WD
4078 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4079 return -ENODEV;
4080
1d487e9b
PB
4081 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4082 ops = kvm_device_ops_table[type];
d60eacb0 4083 if (ops == NULL)
852b6d57 4084 return -ENODEV;
852b6d57
SW
4085
4086 if (test)
4087 return 0;
4088
b12ce36a 4089 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
852b6d57
SW
4090 if (!dev)
4091 return -ENOMEM;
4092
4093 dev->ops = ops;
4094 dev->kvm = kvm;
852b6d57 4095
a28ebea2 4096 mutex_lock(&kvm->lock);
1d487e9b 4097 ret = ops->create(dev, type);
852b6d57 4098 if (ret < 0) {
a28ebea2 4099 mutex_unlock(&kvm->lock);
852b6d57
SW
4100 kfree(dev);
4101 return ret;
4102 }
a28ebea2
CD
4103 list_add(&dev->vm_node, &kvm->devices);
4104 mutex_unlock(&kvm->lock);
852b6d57 4105
023e9fdd
CD
4106 if (ops->init)
4107 ops->init(dev);
4108
cfa39381 4109 kvm_get_kvm(kvm);
24009b05 4110 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
852b6d57 4111 if (ret < 0) {
149487bd 4112 kvm_put_kvm_no_destroy(kvm);
a28ebea2
CD
4113 mutex_lock(&kvm->lock);
4114 list_del(&dev->vm_node);
4115 mutex_unlock(&kvm->lock);
a0f1d21c 4116 ops->destroy(dev);
852b6d57
SW
4117 return ret;
4118 }
4119
852b6d57
SW
4120 cd->fd = ret;
4121 return 0;
4122}
4123
92b591a4
AG
4124static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4125{
4126 switch (arg) {
4127 case KVM_CAP_USER_MEMORY:
4128 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4129 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
92b591a4
AG
4130 case KVM_CAP_INTERNAL_ERROR_DATA:
4131#ifdef CONFIG_HAVE_KVM_MSI
4132 case KVM_CAP_SIGNAL_MSI:
4133#endif
297e2105 4134#ifdef CONFIG_HAVE_KVM_IRQFD
dc9be0fa 4135 case KVM_CAP_IRQFD:
92b591a4
AG
4136 case KVM_CAP_IRQFD_RESAMPLE:
4137#endif
e9ea5069 4138 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
92b591a4 4139 case KVM_CAP_CHECK_EXTENSION_VM:
e5d83c74 4140 case KVM_CAP_ENABLE_CAP_VM:
acd05785 4141 case KVM_CAP_HALT_POLL:
92b591a4 4142 return 1;
4b4357e0 4143#ifdef CONFIG_KVM_MMIO
30422558
PB
4144 case KVM_CAP_COALESCED_MMIO:
4145 return KVM_COALESCED_MMIO_PAGE_OFFSET;
0804c849
PH
4146 case KVM_CAP_COALESCED_PIO:
4147 return 1;
30422558 4148#endif
3c9bd400
JZ
4149#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4150 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4151 return KVM_DIRTY_LOG_MANUAL_CAPS;
4152#endif
92b591a4
AG
4153#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4154 case KVM_CAP_IRQ_ROUTING:
4155 return KVM_MAX_IRQ_ROUTES;
f481b069
PB
4156#endif
4157#if KVM_ADDRESS_SPACE_NUM > 1
4158 case KVM_CAP_MULTI_ADDRESS_SPACE:
4159 return KVM_ADDRESS_SPACE_NUM;
92b591a4 4160#endif
c110ae57
PB
4161 case KVM_CAP_NR_MEMSLOTS:
4162 return KVM_USER_MEM_SLOTS;
fb04a1ed
PX
4163 case KVM_CAP_DIRTY_LOG_RING:
4164#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
4165 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4166#else
4167 return 0;
4168#endif
ce55c049
JZ
4169 case KVM_CAP_BINARY_STATS_FD:
4170 return 1;
92b591a4
AG
4171 default:
4172 break;
4173 }
4174 return kvm_vm_ioctl_check_extension(kvm, arg);
4175}
4176
fb04a1ed
PX
4177static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4178{
4179 int r;
4180
4181 if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4182 return -EINVAL;
4183
4184 /* the size should be power of 2 */
4185 if (!size || (size & (size - 1)))
4186 return -EINVAL;
4187
4188 /* Should be bigger to keep the reserved entries, or a page */
4189 if (size < kvm_dirty_ring_get_rsvd_entries() *
4190 sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4191 return -EINVAL;
4192
4193 if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4194 sizeof(struct kvm_dirty_gfn))
4195 return -E2BIG;
4196
4197 /* We only allow it to set once */
4198 if (kvm->dirty_ring_size)
4199 return -EINVAL;
4200
4201 mutex_lock(&kvm->lock);
4202
4203 if (kvm->created_vcpus) {
4204 /* We don't allow to change this value after vcpu created */
4205 r = -EINVAL;
4206 } else {
4207 kvm->dirty_ring_size = size;
4208 r = 0;
4209 }
4210
4211 mutex_unlock(&kvm->lock);
4212 return r;
4213}
4214
4215static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4216{
4217 int i;
4218 struct kvm_vcpu *vcpu;
4219 int cleared = 0;
4220
4221 if (!kvm->dirty_ring_size)
4222 return -EINVAL;
4223
4224 mutex_lock(&kvm->slots_lock);
4225
4226 kvm_for_each_vcpu(i, vcpu, kvm)
4227 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4228
4229 mutex_unlock(&kvm->slots_lock);
4230
4231 if (cleared)
4232 kvm_flush_remote_tlbs(kvm);
4233
4234 return cleared;
4235}
4236
e5d83c74
PB
4237int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4238 struct kvm_enable_cap *cap)
4239{
4240 return -EINVAL;
4241}
4242
4243static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4244 struct kvm_enable_cap *cap)
4245{
4246 switch (cap->cap) {
2a31b9db 4247#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3c9bd400
JZ
4248 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4249 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4250
4251 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4252 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4253
4254 if (cap->flags || (cap->args[0] & ~allowed_options))
2a31b9db
PB
4255 return -EINVAL;
4256 kvm->manual_dirty_log_protect = cap->args[0];
4257 return 0;
3c9bd400 4258 }
2a31b9db 4259#endif
acd05785
DM
4260 case KVM_CAP_HALT_POLL: {
4261 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4262 return -EINVAL;
4263
4264 kvm->max_halt_poll_ns = cap->args[0];
4265 return 0;
4266 }
fb04a1ed
PX
4267 case KVM_CAP_DIRTY_LOG_RING:
4268 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
e5d83c74
PB
4269 default:
4270 return kvm_vm_ioctl_enable_cap(kvm, cap);
4271 }
4272}
4273
fcfe1bae
JZ
4274static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4275 size_t size, loff_t *offset)
4276{
4277 struct kvm *kvm = file->private_data;
4278
4279 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4280 &kvm_vm_stats_desc[0], &kvm->stat,
4281 sizeof(kvm->stat), user_buffer, size, offset);
4282}
4283
4284static const struct file_operations kvm_vm_stats_fops = {
4285 .read = kvm_vm_stats_read,
4286 .llseek = noop_llseek,
4287};
4288
4289static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4290{
4291 int fd;
4292 struct file *file;
4293
4294 fd = get_unused_fd_flags(O_CLOEXEC);
4295 if (fd < 0)
4296 return fd;
4297
4298 file = anon_inode_getfile("kvm-vm-stats",
4299 &kvm_vm_stats_fops, kvm, O_RDONLY);
4300 if (IS_ERR(file)) {
4301 put_unused_fd(fd);
4302 return PTR_ERR(file);
4303 }
4304 file->f_mode |= FMODE_PREAD;
4305 fd_install(fd, file);
4306
4307 return fd;
4308}
4309
bccf2150
AK
4310static long kvm_vm_ioctl(struct file *filp,
4311 unsigned int ioctl, unsigned long arg)
4312{
4313 struct kvm *kvm = filp->private_data;
4314 void __user *argp = (void __user *)arg;
1fe779f8 4315 int r;
bccf2150 4316
0b8f1173 4317 if (kvm->mm != current->mm || kvm->vm_bugged)
6d4e4c4f 4318 return -EIO;
bccf2150
AK
4319 switch (ioctl) {
4320 case KVM_CREATE_VCPU:
4321 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
bccf2150 4322 break;
e5d83c74
PB
4323 case KVM_ENABLE_CAP: {
4324 struct kvm_enable_cap cap;
4325
4326 r = -EFAULT;
4327 if (copy_from_user(&cap, argp, sizeof(cap)))
4328 goto out;
4329 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4330 break;
4331 }
6fc138d2
IE
4332 case KVM_SET_USER_MEMORY_REGION: {
4333 struct kvm_userspace_memory_region kvm_userspace_mem;
4334
4335 r = -EFAULT;
4336 if (copy_from_user(&kvm_userspace_mem, argp,
893bdbf1 4337 sizeof(kvm_userspace_mem)))
6fc138d2
IE
4338 goto out;
4339
47ae31e2 4340 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
6aa8b732
AK
4341 break;
4342 }
4343 case KVM_GET_DIRTY_LOG: {
4344 struct kvm_dirty_log log;
4345
4346 r = -EFAULT;
893bdbf1 4347 if (copy_from_user(&log, argp, sizeof(log)))
6aa8b732 4348 goto out;
2c6f5df9 4349 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
4350 break;
4351 }
2a31b9db
PB
4352#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4353 case KVM_CLEAR_DIRTY_LOG: {
4354 struct kvm_clear_dirty_log log;
4355
4356 r = -EFAULT;
4357 if (copy_from_user(&log, argp, sizeof(log)))
4358 goto out;
4359 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4360 break;
4361 }
4362#endif
4b4357e0 4363#ifdef CONFIG_KVM_MMIO
5f94c174
LV
4364 case KVM_REGISTER_COALESCED_MMIO: {
4365 struct kvm_coalesced_mmio_zone zone;
f95ef0cd 4366
5f94c174 4367 r = -EFAULT;
893bdbf1 4368 if (copy_from_user(&zone, argp, sizeof(zone)))
5f94c174 4369 goto out;
5f94c174 4370 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5f94c174
LV
4371 break;
4372 }
4373 case KVM_UNREGISTER_COALESCED_MMIO: {
4374 struct kvm_coalesced_mmio_zone zone;
f95ef0cd 4375
5f94c174 4376 r = -EFAULT;
893bdbf1 4377 if (copy_from_user(&zone, argp, sizeof(zone)))
5f94c174 4378 goto out;
5f94c174 4379 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5f94c174
LV
4380 break;
4381 }
4382#endif
721eecbf
GH
4383 case KVM_IRQFD: {
4384 struct kvm_irqfd data;
4385
4386 r = -EFAULT;
893bdbf1 4387 if (copy_from_user(&data, argp, sizeof(data)))
721eecbf 4388 goto out;
d4db2935 4389 r = kvm_irqfd(kvm, &data);
721eecbf
GH
4390 break;
4391 }
d34e6b17
GH
4392 case KVM_IOEVENTFD: {
4393 struct kvm_ioeventfd data;
4394
4395 r = -EFAULT;
893bdbf1 4396 if (copy_from_user(&data, argp, sizeof(data)))
d34e6b17
GH
4397 goto out;
4398 r = kvm_ioeventfd(kvm, &data);
4399 break;
4400 }
07975ad3
JK
4401#ifdef CONFIG_HAVE_KVM_MSI
4402 case KVM_SIGNAL_MSI: {
4403 struct kvm_msi msi;
4404
4405 r = -EFAULT;
893bdbf1 4406 if (copy_from_user(&msi, argp, sizeof(msi)))
07975ad3
JK
4407 goto out;
4408 r = kvm_send_userspace_msi(kvm, &msi);
4409 break;
4410 }
23d43cf9
CD
4411#endif
4412#ifdef __KVM_HAVE_IRQ_LINE
4413 case KVM_IRQ_LINE_STATUS:
4414 case KVM_IRQ_LINE: {
4415 struct kvm_irq_level irq_event;
4416
4417 r = -EFAULT;
893bdbf1 4418 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
23d43cf9
CD
4419 goto out;
4420
aa2fbe6d
YZ
4421 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4422 ioctl == KVM_IRQ_LINE_STATUS);
23d43cf9
CD
4423 if (r)
4424 goto out;
4425
4426 r = -EFAULT;
4427 if (ioctl == KVM_IRQ_LINE_STATUS) {
893bdbf1 4428 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
23d43cf9
CD
4429 goto out;
4430 }
4431
4432 r = 0;
4433 break;
4434 }
73880c80 4435#endif
aa8d5944
AG
4436#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4437 case KVM_SET_GSI_ROUTING: {
4438 struct kvm_irq_routing routing;
4439 struct kvm_irq_routing __user *urouting;
f8c1b85b 4440 struct kvm_irq_routing_entry *entries = NULL;
aa8d5944
AG
4441
4442 r = -EFAULT;
4443 if (copy_from_user(&routing, argp, sizeof(routing)))
4444 goto out;
4445 r = -EINVAL;
5c0aea0e
DH
4446 if (!kvm_arch_can_set_irq_routing(kvm))
4447 goto out;
caf1ff26 4448 if (routing.nr > KVM_MAX_IRQ_ROUTES)
aa8d5944
AG
4449 goto out;
4450 if (routing.flags)
4451 goto out;
f8c1b85b 4452 if (routing.nr) {
f8c1b85b 4453 urouting = argp;
7ec28e26
DE
4454 entries = vmemdup_user(urouting->entries,
4455 array_size(sizeof(*entries),
4456 routing.nr));
4457 if (IS_ERR(entries)) {
4458 r = PTR_ERR(entries);
4459 goto out;
4460 }
f8c1b85b 4461 }
aa8d5944
AG
4462 r = kvm_set_irq_routing(kvm, entries, routing.nr,
4463 routing.flags);
7ec28e26 4464 kvfree(entries);
aa8d5944
AG
4465 break;
4466 }
4467#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
852b6d57
SW
4468 case KVM_CREATE_DEVICE: {
4469 struct kvm_create_device cd;
4470
4471 r = -EFAULT;
4472 if (copy_from_user(&cd, argp, sizeof(cd)))
4473 goto out;
4474
4475 r = kvm_ioctl_create_device(kvm, &cd);
4476 if (r)
4477 goto out;
4478
4479 r = -EFAULT;
4480 if (copy_to_user(argp, &cd, sizeof(cd)))
4481 goto out;
4482
4483 r = 0;
4484 break;
4485 }
92b591a4
AG
4486 case KVM_CHECK_EXTENSION:
4487 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4488 break;
fb04a1ed
PX
4489 case KVM_RESET_DIRTY_RINGS:
4490 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4491 break;
fcfe1bae
JZ
4492 case KVM_GET_STATS_FD:
4493 r = kvm_vm_ioctl_get_stats_fd(kvm);
4494 break;
f17abe9a 4495 default:
1fe779f8 4496 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
4497 }
4498out:
4499 return r;
4500}
4501
de8e5d74 4502#ifdef CONFIG_KVM_COMPAT
6ff5894c
AB
4503struct compat_kvm_dirty_log {
4504 __u32 slot;
4505 __u32 padding1;
4506 union {
4507 compat_uptr_t dirty_bitmap; /* one bit per page */
4508 __u64 padding2;
4509 };
4510};
4511
8750f9bb
PB
4512struct compat_kvm_clear_dirty_log {
4513 __u32 slot;
4514 __u32 num_pages;
4515 __u64 first_page;
4516 union {
4517 compat_uptr_t dirty_bitmap; /* one bit per page */
4518 __u64 padding2;
4519 };
4520};
4521
6ff5894c
AB
4522static long kvm_vm_compat_ioctl(struct file *filp,
4523 unsigned int ioctl, unsigned long arg)
4524{
4525 struct kvm *kvm = filp->private_data;
4526 int r;
4527
0b8f1173 4528 if (kvm->mm != current->mm || kvm->vm_bugged)
6ff5894c
AB
4529 return -EIO;
4530 switch (ioctl) {
8750f9bb
PB
4531#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4532 case KVM_CLEAR_DIRTY_LOG: {
4533 struct compat_kvm_clear_dirty_log compat_log;
4534 struct kvm_clear_dirty_log log;
4535
4536 if (copy_from_user(&compat_log, (void __user *)arg,
4537 sizeof(compat_log)))
4538 return -EFAULT;
4539 log.slot = compat_log.slot;
4540 log.num_pages = compat_log.num_pages;
4541 log.first_page = compat_log.first_page;
4542 log.padding2 = compat_log.padding2;
4543 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4544
4545 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4546 break;
4547 }
4548#endif
6ff5894c
AB
4549 case KVM_GET_DIRTY_LOG: {
4550 struct compat_kvm_dirty_log compat_log;
4551 struct kvm_dirty_log log;
4552
6ff5894c
AB
4553 if (copy_from_user(&compat_log, (void __user *)arg,
4554 sizeof(compat_log)))
f6a3b168 4555 return -EFAULT;
6ff5894c
AB
4556 log.slot = compat_log.slot;
4557 log.padding1 = compat_log.padding1;
4558 log.padding2 = compat_log.padding2;
4559 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4560
4561 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6ff5894c
AB
4562 break;
4563 }
4564 default:
4565 r = kvm_vm_ioctl(filp, ioctl, arg);
4566 }
6ff5894c
AB
4567 return r;
4568}
4569#endif
4570
3d3aab1b 4571static struct file_operations kvm_vm_fops = {
f17abe9a
AK
4572 .release = kvm_vm_release,
4573 .unlocked_ioctl = kvm_vm_ioctl,
6038f373 4574 .llseek = noop_llseek,
7ddfd3e0 4575 KVM_COMPAT(kvm_vm_compat_ioctl),
f17abe9a
AK
4576};
4577
54526d1f
NT
4578bool file_is_kvm(struct file *file)
4579{
4580 return file && file->f_op == &kvm_vm_fops;
4581}
4582EXPORT_SYMBOL_GPL(file_is_kvm);
4583
e08b9637 4584static int kvm_dev_ioctl_create_vm(unsigned long type)
f17abe9a 4585{
aac87636 4586 int r;
f17abe9a 4587 struct kvm *kvm;
506cfba9 4588 struct file *file;
f17abe9a 4589
e08b9637 4590 kvm = kvm_create_vm(type);
d6d28168
AK
4591 if (IS_ERR(kvm))
4592 return PTR_ERR(kvm);
4b4357e0 4593#ifdef CONFIG_KVM_MMIO
6ce5a090 4594 r = kvm_coalesced_mmio_init(kvm);
78588335
ME
4595 if (r < 0)
4596 goto put_kvm;
6ce5a090 4597#endif
506cfba9 4598 r = get_unused_fd_flags(O_CLOEXEC);
78588335
ME
4599 if (r < 0)
4600 goto put_kvm;
4601
fcfe1bae
JZ
4602 snprintf(kvm->stats_id, sizeof(kvm->stats_id),
4603 "kvm-%d", task_pid_nr(current));
4604
506cfba9
AV
4605 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4606 if (IS_ERR(file)) {
4607 put_unused_fd(r);
78588335
ME
4608 r = PTR_ERR(file);
4609 goto put_kvm;
506cfba9 4610 }
536a6f88 4611
525df861
PB
4612 /*
4613 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4614 * already set, with ->release() being kvm_vm_release(). In error
4615 * cases it will be called by the final fput(file) and will take
4616 * care of doing kvm_put_kvm(kvm).
4617 */
536a6f88 4618 if (kvm_create_vm_debugfs(kvm, r) < 0) {
506cfba9
AV
4619 put_unused_fd(r);
4620 fput(file);
536a6f88
JF
4621 return -ENOMEM;
4622 }
286de8f6 4623 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
f17abe9a 4624
506cfba9 4625 fd_install(r, file);
aac87636 4626 return r;
78588335
ME
4627
4628put_kvm:
4629 kvm_put_kvm(kvm);
4630 return r;
f17abe9a
AK
4631}
4632
4633static long kvm_dev_ioctl(struct file *filp,
4634 unsigned int ioctl, unsigned long arg)
4635{
07c45a36 4636 long r = -EINVAL;
f17abe9a
AK
4637
4638 switch (ioctl) {
4639 case KVM_GET_API_VERSION:
f0fe5108
AK
4640 if (arg)
4641 goto out;
f17abe9a
AK
4642 r = KVM_API_VERSION;
4643 break;
4644 case KVM_CREATE_VM:
e08b9637 4645 r = kvm_dev_ioctl_create_vm(arg);
f17abe9a 4646 break;
018d00d2 4647 case KVM_CHECK_EXTENSION:
784aa3d7 4648 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5d308f45 4649 break;
07c45a36 4650 case KVM_GET_VCPU_MMAP_SIZE:
07c45a36
AK
4651 if (arg)
4652 goto out;
adb1ff46
AK
4653 r = PAGE_SIZE; /* struct kvm_run */
4654#ifdef CONFIG_X86
4655 r += PAGE_SIZE; /* pio data page */
5f94c174 4656#endif
4b4357e0 4657#ifdef CONFIG_KVM_MMIO
5f94c174 4658 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 4659#endif
07c45a36 4660 break;
d4c9ff2d
FEL
4661 case KVM_TRACE_ENABLE:
4662 case KVM_TRACE_PAUSE:
4663 case KVM_TRACE_DISABLE:
2023a29c 4664 r = -EOPNOTSUPP;
d4c9ff2d 4665 break;
6aa8b732 4666 default:
043405e1 4667 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
4668 }
4669out:
4670 return r;
4671}
4672
6aa8b732 4673static struct file_operations kvm_chardev_ops = {
6aa8b732 4674 .unlocked_ioctl = kvm_dev_ioctl,
6038f373 4675 .llseek = noop_llseek,
7ddfd3e0 4676 KVM_COMPAT(kvm_dev_ioctl),
6aa8b732
AK
4677};
4678
4679static struct miscdevice kvm_dev = {
bbe4432e 4680 KVM_MINOR,
6aa8b732
AK
4681 "kvm",
4682 &kvm_chardev_ops,
4683};
4684
75b7127c 4685static void hardware_enable_nolock(void *junk)
1b6c0168
AK
4686{
4687 int cpu = raw_smp_processor_id();
10474ae8 4688 int r;
1b6c0168 4689
7f59f492 4690 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 4691 return;
10474ae8 4692
7f59f492 4693 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8 4694
13a34e06 4695 r = kvm_arch_hardware_enable();
10474ae8
AG
4696
4697 if (r) {
4698 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4699 atomic_inc(&hardware_enable_failed);
1170adc6 4700 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
10474ae8 4701 }
1b6c0168
AK
4702}
4703
8c18b2d2 4704static int kvm_starting_cpu(unsigned int cpu)
75b7127c 4705{
4a937f96 4706 raw_spin_lock(&kvm_count_lock);
4fa92fb2
PB
4707 if (kvm_usage_count)
4708 hardware_enable_nolock(NULL);
4a937f96 4709 raw_spin_unlock(&kvm_count_lock);
8c18b2d2 4710 return 0;
75b7127c
TY
4711}
4712
4713static void hardware_disable_nolock(void *junk)
1b6c0168
AK
4714{
4715 int cpu = raw_smp_processor_id();
4716
7f59f492 4717 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 4718 return;
7f59f492 4719 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
13a34e06 4720 kvm_arch_hardware_disable();
1b6c0168
AK
4721}
4722
8c18b2d2 4723static int kvm_dying_cpu(unsigned int cpu)
75b7127c 4724{
4a937f96 4725 raw_spin_lock(&kvm_count_lock);
4fa92fb2
PB
4726 if (kvm_usage_count)
4727 hardware_disable_nolock(NULL);
4a937f96 4728 raw_spin_unlock(&kvm_count_lock);
8c18b2d2 4729 return 0;
75b7127c
TY
4730}
4731
10474ae8
AG
4732static void hardware_disable_all_nolock(void)
4733{
4734 BUG_ON(!kvm_usage_count);
4735
4736 kvm_usage_count--;
4737 if (!kvm_usage_count)
75b7127c 4738 on_each_cpu(hardware_disable_nolock, NULL, 1);
10474ae8
AG
4739}
4740
4741static void hardware_disable_all(void)
4742{
4a937f96 4743 raw_spin_lock(&kvm_count_lock);
10474ae8 4744 hardware_disable_all_nolock();
4a937f96 4745 raw_spin_unlock(&kvm_count_lock);
10474ae8
AG
4746}
4747
4748static int hardware_enable_all(void)
4749{
4750 int r = 0;
4751
4a937f96 4752 raw_spin_lock(&kvm_count_lock);
10474ae8
AG
4753
4754 kvm_usage_count++;
4755 if (kvm_usage_count == 1) {
4756 atomic_set(&hardware_enable_failed, 0);
75b7127c 4757 on_each_cpu(hardware_enable_nolock, NULL, 1);
10474ae8
AG
4758
4759 if (atomic_read(&hardware_enable_failed)) {
4760 hardware_disable_all_nolock();
4761 r = -EBUSY;
4762 }
4763 }
4764
4a937f96 4765 raw_spin_unlock(&kvm_count_lock);
10474ae8
AG
4766
4767 return r;
4768}
4769
9a2b85c6 4770static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 4771 void *v)
9a2b85c6 4772{
8e1c1815
SY
4773 /*
4774 * Some (well, at least mine) BIOSes hang on reboot if
4775 * in vmx root mode.
4776 *
4777 * And Intel TXT required VMX off for all cpu when system shutdown.
4778 */
1170adc6 4779 pr_info("kvm: exiting hardware virtualization\n");
8e1c1815 4780 kvm_rebooting = true;
75b7127c 4781 on_each_cpu(hardware_disable_nolock, NULL, 1);
9a2b85c6
RR
4782 return NOTIFY_OK;
4783}
4784
4785static struct notifier_block kvm_reboot_notifier = {
4786 .notifier_call = kvm_reboot,
4787 .priority = 0,
4788};
4789
e93f8a0f 4790static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2eeb2e94
GH
4791{
4792 int i;
4793
4794 for (i = 0; i < bus->dev_count; i++) {
743eeb0b 4795 struct kvm_io_device *pos = bus->range[i].dev;
2eeb2e94
GH
4796
4797 kvm_iodevice_destructor(pos);
4798 }
e93f8a0f 4799 kfree(bus);
2eeb2e94
GH
4800}
4801
c21fbff1 4802static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
20e87b72 4803 const struct kvm_io_range *r2)
743eeb0b 4804{
8f4216c7
JW
4805 gpa_t addr1 = r1->addr;
4806 gpa_t addr2 = r2->addr;
4807
4808 if (addr1 < addr2)
743eeb0b 4809 return -1;
8f4216c7
JW
4810
4811 /* If r2->len == 0, match the exact address. If r2->len != 0,
4812 * accept any overlapping write. Any order is acceptable for
4813 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4814 * we process all of them.
4815 */
4816 if (r2->len) {
4817 addr1 += r1->len;
4818 addr2 += r2->len;
4819 }
4820
4821 if (addr1 > addr2)
743eeb0b 4822 return 1;
8f4216c7 4823
743eeb0b
SL
4824 return 0;
4825}
4826
a343c9b7
PB
4827static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4828{
c21fbff1 4829 return kvm_io_bus_cmp(p1, p2);
a343c9b7
PB
4830}
4831
39369f7a 4832static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
743eeb0b
SL
4833 gpa_t addr, int len)
4834{
4835 struct kvm_io_range *range, key;
4836 int off;
4837
4838 key = (struct kvm_io_range) {
4839 .addr = addr,
4840 .len = len,
4841 };
4842
4843 range = bsearch(&key, bus->range, bus->dev_count,
4844 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
4845 if (range == NULL)
4846 return -ENOENT;
4847
4848 off = range - bus->range;
4849
c21fbff1 4850 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
743eeb0b
SL
4851 off--;
4852
4853 return off;
4854}
4855
e32edf4f 4856static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
126a5af5
CH
4857 struct kvm_io_range *range, const void *val)
4858{
4859 int idx;
4860
4861 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4862 if (idx < 0)
4863 return -EOPNOTSUPP;
4864
4865 while (idx < bus->dev_count &&
c21fbff1 4866 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
e32edf4f 4867 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
126a5af5
CH
4868 range->len, val))
4869 return idx;
4870 idx++;
4871 }
4872
4873 return -EOPNOTSUPP;
4874}
4875
bda9020e 4876/* kvm_io_bus_write - called under kvm->slots_lock */
e32edf4f 4877int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
bda9020e 4878 int len, const void *val)
2eeb2e94 4879{
90d83dc3 4880 struct kvm_io_bus *bus;
743eeb0b 4881 struct kvm_io_range range;
126a5af5 4882 int r;
743eeb0b
SL
4883
4884 range = (struct kvm_io_range) {
4885 .addr = addr,
4886 .len = len,
4887 };
90d83dc3 4888
e32edf4f 4889 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4890 if (!bus)
4891 return -ENOMEM;
e32edf4f 4892 r = __kvm_io_bus_write(vcpu, bus, &range, val);
126a5af5
CH
4893 return r < 0 ? r : 0;
4894}
a2420107 4895EXPORT_SYMBOL_GPL(kvm_io_bus_write);
126a5af5
CH
4896
4897/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
e32edf4f
NN
4898int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
4899 gpa_t addr, int len, const void *val, long cookie)
126a5af5
CH
4900{
4901 struct kvm_io_bus *bus;
4902 struct kvm_io_range range;
4903
4904 range = (struct kvm_io_range) {
4905 .addr = addr,
4906 .len = len,
4907 };
4908
e32edf4f 4909 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4910 if (!bus)
4911 return -ENOMEM;
126a5af5
CH
4912
4913 /* First try the device referenced by cookie. */
4914 if ((cookie >= 0) && (cookie < bus->dev_count) &&
c21fbff1 4915 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
e32edf4f 4916 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
126a5af5
CH
4917 val))
4918 return cookie;
4919
4920 /*
4921 * cookie contained garbage; fall back to search and return the
4922 * correct cookie value.
4923 */
e32edf4f 4924 return __kvm_io_bus_write(vcpu, bus, &range, val);
126a5af5
CH
4925}
4926
e32edf4f
NN
4927static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4928 struct kvm_io_range *range, void *val)
126a5af5
CH
4929{
4930 int idx;
4931
4932 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
743eeb0b
SL
4933 if (idx < 0)
4934 return -EOPNOTSUPP;
4935
4936 while (idx < bus->dev_count &&
c21fbff1 4937 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
e32edf4f 4938 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
126a5af5
CH
4939 range->len, val))
4940 return idx;
743eeb0b
SL
4941 idx++;
4942 }
4943
bda9020e
MT
4944 return -EOPNOTSUPP;
4945}
2eeb2e94 4946
bda9020e 4947/* kvm_io_bus_read - called under kvm->slots_lock */
e32edf4f 4948int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
e93f8a0f 4949 int len, void *val)
bda9020e 4950{
90d83dc3 4951 struct kvm_io_bus *bus;
743eeb0b 4952 struct kvm_io_range range;
126a5af5 4953 int r;
743eeb0b
SL
4954
4955 range = (struct kvm_io_range) {
4956 .addr = addr,
4957 .len = len,
4958 };
e93f8a0f 4959
e32edf4f 4960 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4961 if (!bus)
4962 return -ENOMEM;
e32edf4f 4963 r = __kvm_io_bus_read(vcpu, bus, &range, val);
126a5af5
CH
4964 return r < 0 ? r : 0;
4965}
743eeb0b 4966
79fac95e 4967/* Caller must hold slots_lock. */
743eeb0b
SL
4968int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
4969 int len, struct kvm_io_device *dev)
6c474694 4970{
d4c67a7a 4971 int i;
e93f8a0f 4972 struct kvm_io_bus *new_bus, *bus;
d4c67a7a 4973 struct kvm_io_range range;
090b7aff 4974
4a12f951 4975 bus = kvm_get_bus(kvm, bus_idx);
90db1043
DH
4976 if (!bus)
4977 return -ENOMEM;
4978
6ea34c9b
AK
4979 /* exclude ioeventfd which is limited by maximum fd */
4980 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
090b7aff 4981 return -ENOSPC;
2eeb2e94 4982
90952cd3 4983 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
b12ce36a 4984 GFP_KERNEL_ACCOUNT);
e93f8a0f
MT
4985 if (!new_bus)
4986 return -ENOMEM;
d4c67a7a
GH
4987
4988 range = (struct kvm_io_range) {
4989 .addr = addr,
4990 .len = len,
4991 .dev = dev,
4992 };
4993
4994 for (i = 0; i < bus->dev_count; i++)
4995 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
4996 break;
4997
4998 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4999 new_bus->dev_count++;
5000 new_bus->range[i] = range;
5001 memcpy(new_bus->range + i + 1, bus->range + i,
5002 (bus->dev_count - i) * sizeof(struct kvm_io_range));
e93f8a0f
MT
5003 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5004 synchronize_srcu_expedited(&kvm->srcu);
5005 kfree(bus);
090b7aff
GH
5006
5007 return 0;
5008}
5009
5d3c4c79
SC
5010int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5011 struct kvm_io_device *dev)
090b7aff 5012{
f6588660 5013 int i, j;
e93f8a0f 5014 struct kvm_io_bus *new_bus, *bus;
090b7aff 5015
7c896d37
SC
5016 lockdep_assert_held(&kvm->slots_lock);
5017
4a12f951 5018 bus = kvm_get_bus(kvm, bus_idx);
df630b8c 5019 if (!bus)
5d3c4c79 5020 return 0;
df630b8c 5021
7c896d37 5022 for (i = 0; i < bus->dev_count; i++) {
a1300716 5023 if (bus->range[i].dev == dev) {
090b7aff
GH
5024 break;
5025 }
7c896d37 5026 }
e93f8a0f 5027
90db1043 5028 if (i == bus->dev_count)
5d3c4c79 5029 return 0;
a1300716 5030
90952cd3 5031 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
b12ce36a 5032 GFP_KERNEL_ACCOUNT);
f6588660 5033 if (new_bus) {
871c433b 5034 memcpy(new_bus, bus, struct_size(bus, range, i));
f6588660
RK
5035 new_bus->dev_count--;
5036 memcpy(new_bus->range + i, bus->range + i + 1,
871c433b 5037 flex_array_size(new_bus, range, new_bus->dev_count - i));
2ee37574
SC
5038 }
5039
5040 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5041 synchronize_srcu_expedited(&kvm->srcu);
5042
5043 /* Destroy the old bus _after_ installing the (null) bus. */
5044 if (!new_bus) {
90db1043 5045 pr_err("kvm: failed to shrink bus, removing it completely\n");
f6588660
RK
5046 for (j = 0; j < bus->dev_count; j++) {
5047 if (j == i)
5048 continue;
5049 kvm_iodevice_destructor(bus->range[j].dev);
5050 }
90db1043 5051 }
a1300716 5052
e93f8a0f 5053 kfree(bus);
5d3c4c79 5054 return new_bus ? 0 : -ENOMEM;
2eeb2e94
GH
5055}
5056
8a39d006
AP
5057struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5058 gpa_t addr)
5059{
5060 struct kvm_io_bus *bus;
5061 int dev_idx, srcu_idx;
5062 struct kvm_io_device *iodev = NULL;
5063
5064 srcu_idx = srcu_read_lock(&kvm->srcu);
5065
5066 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
90db1043
DH
5067 if (!bus)
5068 goto out_unlock;
8a39d006
AP
5069
5070 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5071 if (dev_idx < 0)
5072 goto out_unlock;
5073
5074 iodev = bus->range[dev_idx].dev;
5075
5076out_unlock:
5077 srcu_read_unlock(&kvm->srcu, srcu_idx);
5078
5079 return iodev;
5080}
5081EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5082
536a6f88
JF
5083static int kvm_debugfs_open(struct inode *inode, struct file *file,
5084 int (*get)(void *, u64 *), int (*set)(void *, u64),
5085 const char *fmt)
5086{
5087 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5088 inode->i_private;
5089
605c7130
PX
5090 /*
5091 * The debugfs files are a reference to the kvm struct which
5092 * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
5093 * avoids the race between open and the removal of the debugfs directory.
536a6f88 5094 */
605c7130 5095 if (!kvm_get_kvm_safe(stat_data->kvm))
536a6f88
JF
5096 return -ENOENT;
5097
833b45de 5098 if (simple_attr_open(inode, file, get,
bc9e9e67 5099 kvm_stats_debugfs_mode(stat_data->desc) & 0222
09cbcef6
MP
5100 ? set : NULL,
5101 fmt)) {
536a6f88
JF
5102 kvm_put_kvm(stat_data->kvm);
5103 return -ENOMEM;
5104 }
5105
5106 return 0;
5107}
5108
5109static int kvm_debugfs_release(struct inode *inode, struct file *file)
5110{
5111 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5112 inode->i_private;
5113
5114 simple_attr_release(inode, file);
5115 kvm_put_kvm(stat_data->kvm);
5116
5117 return 0;
5118}
5119
09cbcef6 5120static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
536a6f88 5121{
bc9e9e67 5122 *val = *(u64 *)((void *)(&kvm->stat) + offset);
536a6f88 5123
09cbcef6
MP
5124 return 0;
5125}
5126
5127static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5128{
bc9e9e67 5129 *(u64 *)((void *)(&kvm->stat) + offset) = 0;
536a6f88
JF
5130
5131 return 0;
5132}
5133
09cbcef6 5134static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
ce35ef27 5135{
09cbcef6
MP
5136 int i;
5137 struct kvm_vcpu *vcpu;
ce35ef27 5138
09cbcef6 5139 *val = 0;
ce35ef27 5140
09cbcef6 5141 kvm_for_each_vcpu(i, vcpu, kvm)
bc9e9e67 5142 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
ce35ef27
SJS
5143
5144 return 0;
5145}
5146
09cbcef6 5147static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
536a6f88 5148{
09cbcef6
MP
5149 int i;
5150 struct kvm_vcpu *vcpu;
536a6f88 5151
09cbcef6 5152 kvm_for_each_vcpu(i, vcpu, kvm)
bc9e9e67 5153 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
09cbcef6
MP
5154
5155 return 0;
5156}
536a6f88 5157
09cbcef6 5158static int kvm_stat_data_get(void *data, u64 *val)
536a6f88 5159{
09cbcef6 5160 int r = -EFAULT;
536a6f88 5161 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
536a6f88 5162
bc9e9e67 5163 switch (stat_data->kind) {
09cbcef6
MP
5164 case KVM_STAT_VM:
5165 r = kvm_get_stat_per_vm(stat_data->kvm,
bc9e9e67 5166 stat_data->desc->desc.offset, val);
09cbcef6
MP
5167 break;
5168 case KVM_STAT_VCPU:
5169 r = kvm_get_stat_per_vcpu(stat_data->kvm,
bc9e9e67 5170 stat_data->desc->desc.offset, val);
09cbcef6
MP
5171 break;
5172 }
536a6f88 5173
09cbcef6 5174 return r;
536a6f88
JF
5175}
5176
09cbcef6 5177static int kvm_stat_data_clear(void *data, u64 val)
ce35ef27 5178{
09cbcef6 5179 int r = -EFAULT;
ce35ef27 5180 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
ce35ef27
SJS
5181
5182 if (val)
5183 return -EINVAL;
5184
bc9e9e67 5185 switch (stat_data->kind) {
09cbcef6
MP
5186 case KVM_STAT_VM:
5187 r = kvm_clear_stat_per_vm(stat_data->kvm,
bc9e9e67 5188 stat_data->desc->desc.offset);
09cbcef6
MP
5189 break;
5190 case KVM_STAT_VCPU:
5191 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
bc9e9e67 5192 stat_data->desc->desc.offset);
09cbcef6
MP
5193 break;
5194 }
ce35ef27 5195
09cbcef6 5196 return r;
ce35ef27
SJS
5197}
5198
09cbcef6 5199static int kvm_stat_data_open(struct inode *inode, struct file *file)
536a6f88
JF
5200{
5201 __simple_attr_check_format("%llu\n", 0ull);
09cbcef6
MP
5202 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5203 kvm_stat_data_clear, "%llu\n");
536a6f88
JF
5204}
5205
09cbcef6
MP
5206static const struct file_operations stat_fops_per_vm = {
5207 .owner = THIS_MODULE,
5208 .open = kvm_stat_data_open,
536a6f88 5209 .release = kvm_debugfs_release,
09cbcef6
MP
5210 .read = simple_attr_read,
5211 .write = simple_attr_write,
5212 .llseek = no_llseek,
536a6f88
JF
5213};
5214
8b88b099 5215static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
5216{
5217 unsigned offset = (long)_offset;
ba1389b7 5218 struct kvm *kvm;
536a6f88 5219 u64 tmp_val;
ba1389b7 5220
8b88b099 5221 *val = 0;
0d9ce162 5222 mutex_lock(&kvm_lock);
536a6f88 5223 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 5224 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
536a6f88
JF
5225 *val += tmp_val;
5226 }
0d9ce162 5227 mutex_unlock(&kvm_lock);
8b88b099 5228 return 0;
ba1389b7
AK
5229}
5230
ce35ef27
SJS
5231static int vm_stat_clear(void *_offset, u64 val)
5232{
5233 unsigned offset = (long)_offset;
5234 struct kvm *kvm;
ce35ef27
SJS
5235
5236 if (val)
5237 return -EINVAL;
5238
0d9ce162 5239 mutex_lock(&kvm_lock);
ce35ef27 5240 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 5241 kvm_clear_stat_per_vm(kvm, offset);
ce35ef27 5242 }
0d9ce162 5243 mutex_unlock(&kvm_lock);
ce35ef27
SJS
5244
5245 return 0;
5246}
5247
5248DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
bc9e9e67 5249DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
ba1389b7 5250
8b88b099 5251static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
5252{
5253 unsigned offset = (long)_offset;
1165f5fe 5254 struct kvm *kvm;
536a6f88 5255 u64 tmp_val;
1165f5fe 5256
8b88b099 5257 *val = 0;
0d9ce162 5258 mutex_lock(&kvm_lock);
536a6f88 5259 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 5260 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
536a6f88
JF
5261 *val += tmp_val;
5262 }
0d9ce162 5263 mutex_unlock(&kvm_lock);
8b88b099 5264 return 0;
1165f5fe
AK
5265}
5266
ce35ef27
SJS
5267static int vcpu_stat_clear(void *_offset, u64 val)
5268{
5269 unsigned offset = (long)_offset;
5270 struct kvm *kvm;
ce35ef27
SJS
5271
5272 if (val)
5273 return -EINVAL;
5274
0d9ce162 5275 mutex_lock(&kvm_lock);
ce35ef27 5276 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 5277 kvm_clear_stat_per_vcpu(kvm, offset);
ce35ef27 5278 }
0d9ce162 5279 mutex_unlock(&kvm_lock);
ce35ef27
SJS
5280
5281 return 0;
5282}
5283
5284DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5285 "%llu\n");
bc9e9e67 5286DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
1165f5fe 5287
286de8f6
CI
5288static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5289{
5290 struct kobj_uevent_env *env;
286de8f6
CI
5291 unsigned long long created, active;
5292
5293 if (!kvm_dev.this_device || !kvm)
5294 return;
5295
0d9ce162 5296 mutex_lock(&kvm_lock);
286de8f6
CI
5297 if (type == KVM_EVENT_CREATE_VM) {
5298 kvm_createvm_count++;
5299 kvm_active_vms++;
5300 } else if (type == KVM_EVENT_DESTROY_VM) {
5301 kvm_active_vms--;
5302 }
5303 created = kvm_createvm_count;
5304 active = kvm_active_vms;
0d9ce162 5305 mutex_unlock(&kvm_lock);
286de8f6 5306
b12ce36a 5307 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
286de8f6
CI
5308 if (!env)
5309 return;
5310
5311 add_uevent_var(env, "CREATED=%llu", created);
5312 add_uevent_var(env, "COUNT=%llu", active);
5313
fdeaf7e3 5314 if (type == KVM_EVENT_CREATE_VM) {
286de8f6 5315 add_uevent_var(env, "EVENT=create");
fdeaf7e3
CI
5316 kvm->userspace_pid = task_pid_nr(current);
5317 } else if (type == KVM_EVENT_DESTROY_VM) {
286de8f6 5318 add_uevent_var(env, "EVENT=destroy");
fdeaf7e3
CI
5319 }
5320 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
286de8f6 5321
85cd39af 5322 if (kvm->debugfs_dentry) {
b12ce36a 5323 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
fdeaf7e3
CI
5324
5325 if (p) {
5326 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5327 if (!IS_ERR(tmp))
5328 add_uevent_var(env, "STATS_PATH=%s", tmp);
5329 kfree(p);
286de8f6
CI
5330 }
5331 }
5332 /* no need for checks, since we are adding at most only 5 keys */
5333 env->envp[env->envp_idx++] = NULL;
5334 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5335 kfree(env);
286de8f6
CI
5336}
5337
929f45e3 5338static void kvm_init_debug(void)
6aa8b732 5339{
bc9e9e67
JZ
5340 const struct file_operations *fops;
5341 const struct _kvm_stats_desc *pdesc;
5342 int i;
6aa8b732 5343
76f7c879 5344 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4f69b680 5345
bc9e9e67
JZ
5346 for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5347 pdesc = &kvm_vm_stats_desc[i];
5348 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5349 fops = &vm_stat_fops;
5350 else
5351 fops = &vm_stat_readonly_fops;
5352 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5353 kvm_debugfs_dir,
5354 (void *)(long)pdesc->desc.offset, fops);
5355 }
5356
5357 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5358 pdesc = &kvm_vcpu_stats_desc[i];
5359 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5360 fops = &vcpu_stat_fops;
5361 else
5362 fops = &vcpu_stat_readonly_fops;
5363 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5364 kvm_debugfs_dir,
5365 (void *)(long)pdesc->desc.offset, fops);
4f69b680 5366 }
6aa8b732
AK
5367}
5368
fb3600cc 5369static int kvm_suspend(void)
59ae6c6b 5370{
10474ae8 5371 if (kvm_usage_count)
75b7127c 5372 hardware_disable_nolock(NULL);
59ae6c6b
AK
5373 return 0;
5374}
5375
fb3600cc 5376static void kvm_resume(void)
59ae6c6b 5377{
ca84d1a2 5378 if (kvm_usage_count) {
2eb06c30
WL
5379#ifdef CONFIG_LOCKDEP
5380 WARN_ON(lockdep_is_held(&kvm_count_lock));
5381#endif
75b7127c 5382 hardware_enable_nolock(NULL);
ca84d1a2 5383 }
59ae6c6b
AK
5384}
5385
fb3600cc 5386static struct syscore_ops kvm_syscore_ops = {
59ae6c6b
AK
5387 .suspend = kvm_suspend,
5388 .resume = kvm_resume,
5389};
5390
15ad7146
AK
5391static inline
5392struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5393{
5394 return container_of(pn, struct kvm_vcpu, preempt_notifier);
5395}
5396
5397static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5398{
5399 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
f95ef0cd 5400
046ddeed 5401 WRITE_ONCE(vcpu->preempted, false);
d73eb57b 5402 WRITE_ONCE(vcpu->ready, false);
15ad7146 5403
7495e22b 5404 __this_cpu_write(kvm_running_vcpu, vcpu);
e790d9ef 5405 kvm_arch_sched_in(vcpu, cpu);
e9b11c17 5406 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
5407}
5408
5409static void kvm_sched_out(struct preempt_notifier *pn,
5410 struct task_struct *next)
5411{
5412 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5413
3ba9f93b 5414 if (current->on_rq) {
046ddeed 5415 WRITE_ONCE(vcpu->preempted, true);
d73eb57b
WL
5416 WRITE_ONCE(vcpu->ready, true);
5417 }
e9b11c17 5418 kvm_arch_vcpu_put(vcpu);
7495e22b
PB
5419 __this_cpu_write(kvm_running_vcpu, NULL);
5420}
5421
5422/**
5423 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
1f03b2bc
MZ
5424 *
5425 * We can disable preemption locally around accessing the per-CPU variable,
5426 * and use the resolved vcpu pointer after enabling preemption again,
5427 * because even if the current thread is migrated to another CPU, reading
5428 * the per-CPU value later will give us the same value as we update the
5429 * per-CPU variable in the preempt notifier handlers.
7495e22b
PB
5430 */
5431struct kvm_vcpu *kvm_get_running_vcpu(void)
5432{
1f03b2bc
MZ
5433 struct kvm_vcpu *vcpu;
5434
5435 preempt_disable();
5436 vcpu = __this_cpu_read(kvm_running_vcpu);
5437 preempt_enable();
5438
5439 return vcpu;
7495e22b 5440}
379a3c8e 5441EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
7495e22b
PB
5442
5443/**
5444 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5445 */
5446struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5447{
5448 return &kvm_running_vcpu;
15ad7146
AK
5449}
5450
b9904085
SC
5451struct kvm_cpu_compat_check {
5452 void *opaque;
5453 int *ret;
5454};
5455
5456static void check_processor_compat(void *data)
f257d6dc 5457{
b9904085
SC
5458 struct kvm_cpu_compat_check *c = data;
5459
5460 *c->ret = kvm_arch_check_processor_compat(c->opaque);
f257d6dc
SC
5461}
5462
0ee75bea 5463int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
c16f862d 5464 struct module *module)
6aa8b732 5465{
b9904085 5466 struct kvm_cpu_compat_check c;
6aa8b732 5467 int r;
002c7f7c 5468 int cpu;
6aa8b732 5469
f8c16bba
ZX
5470 r = kvm_arch_init(opaque);
5471 if (r)
d2308784 5472 goto out_fail;
cb498ea2 5473
7dac16c3
AH
5474 /*
5475 * kvm_arch_init makes sure there's at most one caller
5476 * for architectures that support multiple implementations,
5477 * like intel and amd on x86.
36343f6e
PB
5478 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5479 * conflicts in case kvm is already setup for another implementation.
7dac16c3 5480 */
36343f6e
PB
5481 r = kvm_irqfd_init();
5482 if (r)
5483 goto out_irqfd;
7dac16c3 5484
8437a617 5485 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
5486 r = -ENOMEM;
5487 goto out_free_0;
5488 }
5489
b9904085 5490 r = kvm_arch_hardware_setup(opaque);
6aa8b732 5491 if (r < 0)
faf0be22 5492 goto out_free_1;
6aa8b732 5493
b9904085
SC
5494 c.ret = &r;
5495 c.opaque = opaque;
002c7f7c 5496 for_each_online_cpu(cpu) {
b9904085 5497 smp_call_function_single(cpu, check_processor_compat, &c, 1);
002c7f7c 5498 if (r < 0)
faf0be22 5499 goto out_free_2;
002c7f7c
YS
5500 }
5501
73c1b41e 5502 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
8c18b2d2 5503 kvm_starting_cpu, kvm_dying_cpu);
774c47f1 5504 if (r)
d2308784 5505 goto out_free_2;
6aa8b732
AK
5506 register_reboot_notifier(&kvm_reboot_notifier);
5507
c16f862d 5508 /* A kmem cache lets us meet the alignment requirements of fx_save. */
0ee75bea
AK
5509 if (!vcpu_align)
5510 vcpu_align = __alignof__(struct kvm_vcpu);
46515736
PB
5511 kvm_vcpu_cache =
5512 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5513 SLAB_ACCOUNT,
5514 offsetof(struct kvm_vcpu, arch),
ce55c049
JZ
5515 offsetofend(struct kvm_vcpu, stats_id)
5516 - offsetof(struct kvm_vcpu, arch),
46515736 5517 NULL);
c16f862d
RR
5518 if (!kvm_vcpu_cache) {
5519 r = -ENOMEM;
fb3600cc 5520 goto out_free_3;
c16f862d
RR
5521 }
5522
af585b92
GN
5523 r = kvm_async_pf_init();
5524 if (r)
5525 goto out_free;
5526
6aa8b732 5527 kvm_chardev_ops.owner = module;
3d3aab1b
CB
5528 kvm_vm_fops.owner = module;
5529 kvm_vcpu_fops.owner = module;
6aa8b732
AK
5530
5531 r = misc_register(&kvm_dev);
5532 if (r) {
1170adc6 5533 pr_err("kvm: misc device register failed\n");
af585b92 5534 goto out_unreg;
6aa8b732
AK
5535 }
5536
fb3600cc
RW
5537 register_syscore_ops(&kvm_syscore_ops);
5538
15ad7146
AK
5539 kvm_preempt_ops.sched_in = kvm_sched_in;
5540 kvm_preempt_ops.sched_out = kvm_sched_out;
5541
929f45e3 5542 kvm_init_debug();
0ea4ed8e 5543
3c3c29fd
PB
5544 r = kvm_vfio_ops_init();
5545 WARN_ON(r);
5546
c7addb90 5547 return 0;
6aa8b732 5548
af585b92
GN
5549out_unreg:
5550 kvm_async_pf_deinit();
6aa8b732 5551out_free:
c16f862d 5552 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 5553out_free_3:
6aa8b732 5554 unregister_reboot_notifier(&kvm_reboot_notifier);
8c18b2d2 5555 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
d2308784 5556out_free_2:
e9b11c17 5557 kvm_arch_hardware_unsetup();
faf0be22 5558out_free_1:
7f59f492 5559 free_cpumask_var(cpus_hardware_enabled);
d2308784 5560out_free_0:
a0f155e9 5561 kvm_irqfd_exit();
36343f6e 5562out_irqfd:
7dac16c3
AH
5563 kvm_arch_exit();
5564out_fail:
6aa8b732
AK
5565 return r;
5566}
cb498ea2 5567EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 5568
cb498ea2 5569void kvm_exit(void)
6aa8b732 5570{
4bd33b56 5571 debugfs_remove_recursive(kvm_debugfs_dir);
6aa8b732 5572 misc_deregister(&kvm_dev);
c16f862d 5573 kmem_cache_destroy(kvm_vcpu_cache);
af585b92 5574 kvm_async_pf_deinit();
fb3600cc 5575 unregister_syscore_ops(&kvm_syscore_ops);
6aa8b732 5576 unregister_reboot_notifier(&kvm_reboot_notifier);
8c18b2d2 5577 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
75b7127c 5578 on_each_cpu(hardware_disable_nolock, NULL, 1);
e9b11c17 5579 kvm_arch_hardware_unsetup();
f8c16bba 5580 kvm_arch_exit();
a0f155e9 5581 kvm_irqfd_exit();
7f59f492 5582 free_cpumask_var(cpus_hardware_enabled);
571ee1b6 5583 kvm_vfio_ops_exit();
6aa8b732 5584}
cb498ea2 5585EXPORT_SYMBOL_GPL(kvm_exit);
c57c8046
JS
5586
5587struct kvm_vm_worker_thread_context {
5588 struct kvm *kvm;
5589 struct task_struct *parent;
5590 struct completion init_done;
5591 kvm_vm_thread_fn_t thread_fn;
5592 uintptr_t data;
5593 int err;
5594};
5595
5596static int kvm_vm_worker_thread(void *context)
5597{
5598 /*
5599 * The init_context is allocated on the stack of the parent thread, so
5600 * we have to locally copy anything that is needed beyond initialization
5601 */
5602 struct kvm_vm_worker_thread_context *init_context = context;
5603 struct kvm *kvm = init_context->kvm;
5604 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5605 uintptr_t data = init_context->data;
5606 int err;
5607
5608 err = kthread_park(current);
5609 /* kthread_park(current) is never supposed to return an error */
5610 WARN_ON(err != 0);
5611 if (err)
5612 goto init_complete;
5613
5614 err = cgroup_attach_task_all(init_context->parent, current);
5615 if (err) {
5616 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5617 __func__, err);
5618 goto init_complete;
5619 }
5620
5621 set_user_nice(current, task_nice(init_context->parent));
5622
5623init_complete:
5624 init_context->err = err;
5625 complete(&init_context->init_done);
5626 init_context = NULL;
5627
5628 if (err)
5629 return err;
5630
5631 /* Wait to be woken up by the spawner before proceeding. */
5632 kthread_parkme();
5633
5634 if (!kthread_should_stop())
5635 err = thread_fn(kvm, data);
5636
5637 return err;
5638}
5639
5640int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5641 uintptr_t data, const char *name,
5642 struct task_struct **thread_ptr)
5643{
5644 struct kvm_vm_worker_thread_context init_context = {};
5645 struct task_struct *thread;
5646
5647 *thread_ptr = NULL;
5648 init_context.kvm = kvm;
5649 init_context.parent = current;
5650 init_context.thread_fn = thread_fn;
5651 init_context.data = data;
5652 init_completion(&init_context.init_done);
5653
5654 thread = kthread_run(kvm_vm_worker_thread, &init_context,
5655 "%s-%d", name, task_pid_nr(current));
5656 if (IS_ERR(thread))
5657 return PTR_ERR(thread);
5658
5659 /* kthread_run is never supposed to return NULL */
5660 WARN_ON(thread == NULL);
5661
5662 wait_for_completion(&init_context.init_done);
5663
5664 if (!init_context.err)
5665 *thread_ptr = thread;
5666
5667 return init_context.err;
5668}