]>
Commit | Line | Data |
---|---|---|
edf88417 AK |
1 | #ifndef __KVM_HOST_H |
2 | #define __KVM_HOST_H | |
6aa8b732 | 3 | |
f445f11e KH |
4 | #if IS_ENABLED(CONFIG_KVM) |
5 | ||
6aa8b732 AK |
6 | /* |
7 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
8 | * the COPYING file in the top-level directory. | |
9 | */ | |
10 | ||
11 | #include <linux/types.h> | |
e56a7a28 | 12 | #include <linux/hardirq.h> |
6aa8b732 AK |
13 | #include <linux/list.h> |
14 | #include <linux/mutex.h> | |
15 | #include <linux/spinlock.h> | |
06ff0d37 MR |
16 | #include <linux/signal.h> |
17 | #include <linux/sched.h> | |
187f1882 | 18 | #include <linux/bug.h> |
6aa8b732 | 19 | #include <linux/mm.h> |
b297e672 | 20 | #include <linux/mmu_notifier.h> |
15ad7146 | 21 | #include <linux/preempt.h> |
0937c48d | 22 | #include <linux/msi.h> |
d89f5eff | 23 | #include <linux/slab.h> |
bd2b53b2 | 24 | #include <linux/rcupdate.h> |
bd80158a | 25 | #include <linux/ratelimit.h> |
83f09228 | 26 | #include <linux/err.h> |
c11f11fc | 27 | #include <linux/irqflags.h> |
e8edc6e0 | 28 | #include <asm/signal.h> |
6aa8b732 | 29 | |
6aa8b732 | 30 | #include <linux/kvm.h> |
102d8325 | 31 | #include <linux/kvm_para.h> |
6aa8b732 | 32 | |
edf88417 | 33 | #include <linux/kvm_types.h> |
d77a39d9 | 34 | |
edf88417 | 35 | #include <asm/kvm_host.h> |
d657a98e | 36 | |
cef4dea0 AK |
37 | #ifndef KVM_MMIO_SIZE |
38 | #define KVM_MMIO_SIZE 8 | |
39 | #endif | |
40 | ||
67b29204 XG |
41 | /* |
42 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used | |
43 | * in kvm, other bits are visible for userspace which are defined in | |
44 | * include/linux/kvm_h. | |
45 | */ | |
46 | #define KVM_MEMSLOT_INVALID (1UL << 16) | |
47 | ||
87da7e66 XG |
48 | /* Two fragments for cross MMIO pages. */ |
49 | #define KVM_MAX_MMIO_FRAGMENTS 2 | |
f78146b0 | 50 | |
9c5b1172 XG |
51 | /* |
52 | * For the normal pfn, the highest 12 bits should be zero, | |
81c52c56 XG |
53 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
54 | * mask bit 63 to indicate the noslot pfn. | |
9c5b1172 | 55 | */ |
81c52c56 XG |
56 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
57 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) | |
58 | #define KVM_PFN_NOSLOT (0x1ULL << 63) | |
9c5b1172 XG |
59 | |
60 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | |
61 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | |
81c52c56 | 62 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
6c8ee57b | 63 | |
81c52c56 XG |
64 | /* |
65 | * error pfns indicate that the gfn is in slot but faild to | |
66 | * translate it to pfn on host. | |
67 | */ | |
9c5b1172 | 68 | static inline bool is_error_pfn(pfn_t pfn) |
83f09228 | 69 | { |
9c5b1172 | 70 | return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228 XG |
71 | } |
72 | ||
81c52c56 XG |
73 | /* |
74 | * error_noslot pfns indicate that the gfn can not be | |
75 | * translated to pfn - it is not in slot or failed to | |
76 | * translate it to pfn. | |
77 | */ | |
78 | static inline bool is_error_noslot_pfn(pfn_t pfn) | |
83f09228 | 79 | { |
81c52c56 | 80 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228 XG |
81 | } |
82 | ||
81c52c56 XG |
83 | /* noslot pfn indicates that the gfn is not in slot. */ |
84 | static inline bool is_noslot_pfn(pfn_t pfn) | |
83f09228 | 85 | { |
81c52c56 | 86 | return pfn == KVM_PFN_NOSLOT; |
83f09228 XG |
87 | } |
88 | ||
7068d097 XG |
89 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
90 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) | |
ca3a490c XG |
91 | |
92 | static inline bool kvm_is_error_hva(unsigned long addr) | |
93 | { | |
7068d097 | 94 | return addr >= PAGE_OFFSET; |
ca3a490c XG |
95 | } |
96 | ||
6cede2e6 XG |
97 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
98 | ||
9c5b1172 | 99 | static inline bool is_error_page(struct page *page) |
6cede2e6 XG |
100 | { |
101 | return IS_ERR(page); | |
102 | } | |
103 | ||
d9e368d6 AK |
104 | /* |
105 | * vcpu->requests bit members | |
106 | */ | |
3176bc3e | 107 | #define KVM_REQ_TLB_FLUSH 0 |
2f52d58c | 108 | #define KVM_REQ_MIGRATE_TIMER 1 |
b209749f | 109 | #define KVM_REQ_REPORT_TPR_ACCESS 2 |
2e53d63a | 110 | #define KVM_REQ_MMU_RELOAD 3 |
71c4dfaf | 111 | #define KVM_REQ_TRIPLE_FAULT 4 |
06e05645 | 112 | #define KVM_REQ_PENDING_TIMER 5 |
d7690175 | 113 | #define KVM_REQ_UNHALT 6 |
4731d4c7 | 114 | #define KVM_REQ_MMU_SYNC 7 |
34c238a1 | 115 | #define KVM_REQ_CLOCK_UPDATE 8 |
32f88400 | 116 | #define KVM_REQ_KICK 9 |
02daab21 | 117 | #define KVM_REQ_DEACTIVATE_FPU 10 |
3842d135 | 118 | #define KVM_REQ_EVENT 11 |
af585b92 | 119 | #define KVM_REQ_APF_HALT 12 |
c9aaa895 | 120 | #define KVM_REQ_STEAL_UPDATE 13 |
7460fb4a | 121 | #define KVM_REQ_NMI 14 |
d6185f20 | 122 | #define KVM_REQ_IMMEDIATE_EXIT 15 |
f5132b01 GN |
123 | #define KVM_REQ_PMU 16 |
124 | #define KVM_REQ_PMI 17 | |
f61c94bb | 125 | #define KVM_REQ_WATCHDOG 18 |
d828199e MT |
126 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 |
127 | #define KVM_REQ_MCLOCK_INPROGRESS 20 | |
1c810636 | 128 | #define KVM_REQ_EPR_EXIT 21 |
c7c9c56c | 129 | #define KVM_REQ_EOIBITMAP 22 |
6aa8b732 | 130 | |
7a84428a AW |
131 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
132 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | |
5550af4d | 133 | |
6c474694 | 134 | struct kvm; |
6aa8b732 | 135 | struct kvm_vcpu; |
c16f862d | 136 | extern struct kmem_cache *kvm_vcpu_cache; |
6aa8b732 | 137 | |
743eeb0b SL |
138 | struct kvm_io_range { |
139 | gpa_t addr; | |
140 | int len; | |
141 | struct kvm_io_device *dev; | |
142 | }; | |
143 | ||
786a9f88 | 144 | #define NR_IOBUS_DEVS 1000 |
a1300716 | 145 | |
2eeb2e94 GH |
146 | struct kvm_io_bus { |
147 | int dev_count; | |
a1300716 | 148 | struct kvm_io_range range[]; |
2eeb2e94 GH |
149 | }; |
150 | ||
e93f8a0f MT |
151 | enum kvm_bus { |
152 | KVM_MMIO_BUS, | |
153 | KVM_PIO_BUS, | |
060f0ce6 | 154 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
e93f8a0f MT |
155 | KVM_NR_BUSES |
156 | }; | |
157 | ||
158 | int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |
159 | int len, const void *val); | |
160 | int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, | |
bda9020e | 161 | void *val); |
743eeb0b SL |
162 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
163 | int len, struct kvm_io_device *dev); | |
e93f8a0f MT |
164 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
165 | struct kvm_io_device *dev); | |
2eeb2e94 | 166 | |
af585b92 GN |
167 | #ifdef CONFIG_KVM_ASYNC_PF |
168 | struct kvm_async_pf { | |
169 | struct work_struct work; | |
170 | struct list_head link; | |
171 | struct list_head queue; | |
172 | struct kvm_vcpu *vcpu; | |
173 | struct mm_struct *mm; | |
174 | gva_t gva; | |
175 | unsigned long addr; | |
176 | struct kvm_arch_async_pf arch; | |
177 | struct page *page; | |
178 | bool done; | |
179 | }; | |
180 | ||
181 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | |
182 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | |
183 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | |
184 | struct kvm_arch_async_pf *arch); | |
344d9588 | 185 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b92 GN |
186 | #endif |
187 | ||
6b7e2d09 XG |
188 | enum { |
189 | OUTSIDE_GUEST_MODE, | |
190 | IN_GUEST_MODE, | |
c142786c AK |
191 | EXITING_GUEST_MODE, |
192 | READING_SHADOW_PAGE_TABLES, | |
6b7e2d09 XG |
193 | }; |
194 | ||
f78146b0 AK |
195 | /* |
196 | * Sometimes a large or cross-page mmio needs to be broken up into separate | |
197 | * exits for userspace servicing. | |
198 | */ | |
199 | struct kvm_mmio_fragment { | |
200 | gpa_t gpa; | |
201 | void *data; | |
202 | unsigned len; | |
203 | }; | |
204 | ||
d17fbbf7 ZX |
205 | struct kvm_vcpu { |
206 | struct kvm *kvm; | |
31bb117e | 207 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf7 | 208 | struct preempt_notifier preempt_notifier; |
31bb117e | 209 | #endif |
6b7e2d09 | 210 | int cpu; |
d17fbbf7 | 211 | int vcpu_id; |
6b7e2d09 XG |
212 | int srcu_idx; |
213 | int mode; | |
d17fbbf7 | 214 | unsigned long requests; |
d0bfb940 | 215 | unsigned long guest_debug; |
6b7e2d09 XG |
216 | |
217 | struct mutex mutex; | |
218 | struct kvm_run *run; | |
f656ce01 | 219 | |
d17fbbf7 | 220 | int fpu_active; |
2acf923e | 221 | int guest_fpu_loaded, guest_xcr0_loaded; |
d17fbbf7 | 222 | wait_queue_head_t wq; |
34bb10b7 | 223 | struct pid *pid; |
d17fbbf7 ZX |
224 | int sigset_active; |
225 | sigset_t sigset; | |
226 | struct kvm_vcpu_stat stat; | |
227 | ||
34c16eec | 228 | #ifdef CONFIG_HAS_IOMEM |
d17fbbf7 ZX |
229 | int mmio_needed; |
230 | int mmio_read_completed; | |
231 | int mmio_is_write; | |
f78146b0 AK |
232 | int mmio_cur_fragment; |
233 | int mmio_nr_fragments; | |
234 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; | |
34c16eec | 235 | #endif |
1165f5fe | 236 | |
af585b92 GN |
237 | #ifdef CONFIG_KVM_ASYNC_PF |
238 | struct { | |
239 | u32 queued; | |
240 | struct list_head queue; | |
241 | struct list_head done; | |
242 | spinlock_t lock; | |
243 | } async_pf; | |
244 | #endif | |
245 | ||
4c088493 R |
246 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
247 | /* | |
248 | * Cpu relax intercept or pause loop exit optimization | |
249 | * in_spin_loop: set when a vcpu does a pause loop exit | |
250 | * or cpu relax intercepted. | |
251 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | |
252 | */ | |
253 | struct { | |
254 | bool in_spin_loop; | |
255 | bool dy_eligible; | |
256 | } spin_loop; | |
257 | #endif | |
3a08a8f9 | 258 | bool preempted; |
d657a98e ZX |
259 | struct kvm_vcpu_arch arch; |
260 | }; | |
261 | ||
6b7e2d09 XG |
262 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
263 | { | |
264 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); | |
265 | } | |
266 | ||
660c22c4 TY |
267 | /* |
268 | * Some of the bitops functions do not support too long bitmaps. | |
269 | * This number must be determined not to exceed such limits. | |
270 | */ | |
271 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | |
272 | ||
6aa8b732 AK |
273 | struct kvm_memory_slot { |
274 | gfn_t base_gfn; | |
275 | unsigned long npages; | |
6aa8b732 | 276 | unsigned long *dirty_bitmap; |
db3fe4eb | 277 | struct kvm_arch_memory_slot arch; |
8a7ae055 | 278 | unsigned long userspace_addr; |
6104f472 | 279 | u32 flags; |
1e702d9a | 280 | short id; |
6aa8b732 AK |
281 | }; |
282 | ||
87bf6e7d TY |
283 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
284 | { | |
285 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
286 | } | |
287 | ||
399ec807 AK |
288 | struct kvm_kernel_irq_routing_entry { |
289 | u32 gsi; | |
5116d8f6 | 290 | u32 type; |
4925663a | 291 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
1a6e4a8c | 292 | struct kvm *kvm, int irq_source_id, int level); |
399ec807 AK |
293 | union { |
294 | struct { | |
295 | unsigned irqchip; | |
296 | unsigned pin; | |
297 | } irqchip; | |
79950e10 | 298 | struct msi_msg msi; |
399ec807 | 299 | }; |
46e624b9 GN |
300 | struct hlist_node link; |
301 | }; | |
302 | ||
3e71f88b GN |
303 | #ifdef __KVM_HAVE_IOAPIC |
304 | ||
46e624b9 | 305 | struct kvm_irq_routing_table { |
3e71f88b | 306 | int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; |
46e624b9 GN |
307 | struct kvm_kernel_irq_routing_entry *rt_entries; |
308 | u32 nr_rt_entries; | |
309 | /* | |
310 | * Array indexed by gsi. Each entry contains list of irq chips | |
311 | * the gsi is connected to. | |
312 | */ | |
313 | struct hlist_head map[0]; | |
399ec807 AK |
314 | }; |
315 | ||
3e71f88b GN |
316 | #else |
317 | ||
318 | struct kvm_irq_routing_table {}; | |
319 | ||
320 | #endif | |
321 | ||
0743247f AW |
322 | #ifndef KVM_PRIVATE_MEM_SLOTS |
323 | #define KVM_PRIVATE_MEM_SLOTS 0 | |
324 | #endif | |
325 | ||
93a5cef0 | 326 | #ifndef KVM_MEM_SLOTS_NUM |
bbacc0c1 | 327 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
93a5cef0 XG |
328 | #endif |
329 | ||
bf3e05bc XG |
330 | /* |
331 | * Note: | |
332 | * memslots are not sorted by id anymore, please use id_to_memslot() | |
333 | * to get the memslot by its id. | |
334 | */ | |
46a26bf5 | 335 | struct kvm_memslots { |
49c7754c | 336 | u64 generation; |
93a5cef0 | 337 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
f85e2cb5 | 338 | /* The mapping table from slot id to the index in memslots[]. */ |
1e702d9a | 339 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
46a26bf5 MT |
340 | }; |
341 | ||
6aa8b732 | 342 | struct kvm { |
aaee2c94 | 343 | spinlock_t mmu_lock; |
79fac95e | 344 | struct mutex slots_lock; |
6d4e4c4f | 345 | struct mm_struct *mm; /* userspace tied to this vm */ |
46a26bf5 | 346 | struct kvm_memslots *memslots; |
bc6678a3 | 347 | struct srcu_struct srcu; |
73880c80 GN |
348 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
349 | u32 bsp_vcpu_id; | |
73880c80 | 350 | #endif |
fb3f0f51 | 351 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
73880c80 | 352 | atomic_t online_vcpus; |
217ece61 | 353 | int last_boosted_vcpu; |
133de902 | 354 | struct list_head vm_list; |
60eead79 | 355 | struct mutex lock; |
e93f8a0f | 356 | struct kvm_io_bus *buses[KVM_NR_BUSES]; |
721eecbf GH |
357 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
358 | struct { | |
359 | spinlock_t lock; | |
360 | struct list_head items; | |
7a84428a AW |
361 | struct list_head resampler_list; |
362 | struct mutex resampler_lock; | |
721eecbf | 363 | } irqfds; |
d34e6b17 | 364 | struct list_head ioeventfds; |
721eecbf | 365 | #endif |
ba1389b7 | 366 | struct kvm_vm_stat stat; |
d69fb81f | 367 | struct kvm_arch arch; |
d39f13b0 | 368 | atomic_t users_count; |
5f94c174 | 369 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
5f94c174 | 370 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a SL |
371 | spinlock_t ring_lock; |
372 | struct list_head coalesced_zones; | |
5f94c174 | 373 | #endif |
e930bffe | 374 | |
60eead79 | 375 | struct mutex irq_lock; |
75858a84 | 376 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b2 MT |
377 | /* |
378 | * Update side is protected by irq_lock and, | |
379 | * if configured, irqfds.lock. | |
380 | */ | |
4b6a2872 | 381 | struct kvm_irq_routing_table __rcu *irq_routing; |
75858a84 | 382 | struct hlist_head mask_notifier_list; |
136bdfee | 383 | struct hlist_head irq_ack_notifier_list; |
75858a84 AK |
384 | #endif |
385 | ||
36c1ed82 | 386 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
e930bffe AA |
387 | struct mmu_notifier mmu_notifier; |
388 | unsigned long mmu_notifier_seq; | |
389 | long mmu_notifier_count; | |
390 | #endif | |
5c663a15 | 391 | long tlbs_dirty; |
6aa8b732 AK |
392 | }; |
393 | ||
a737f256 CD |
394 | #define kvm_err(fmt, ...) \ |
395 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
396 | #define kvm_info(fmt, ...) \ | |
397 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
398 | #define kvm_debug(fmt, ...) \ | |
399 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
400 | #define kvm_pr_unimpl(fmt, ...) \ | |
401 | pr_err_ratelimited("kvm [%i]: " fmt, \ | |
402 | task_tgid_nr(current), ## __VA_ARGS__) | |
f0242478 | 403 | |
a737f256 CD |
404 | /* The guest did something we don't support. */ |
405 | #define vcpu_unimpl(vcpu, fmt, ...) \ | |
406 | kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
6aa8b732 | 407 | |
988a2cae GN |
408 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
409 | { | |
410 | smp_rmb(); | |
411 | return kvm->vcpus[i]; | |
412 | } | |
413 | ||
414 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ | |
b42fc3cb JM |
415 | for (idx = 0; \ |
416 | idx < atomic_read(&kvm->online_vcpus) && \ | |
417 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | |
418 | idx++) | |
988a2cae | 419 | |
be6ba0f0 XG |
420 | #define kvm_for_each_memslot(memslot, slots) \ |
421 | for (memslot = &slots->memslots[0]; \ | |
bf3e05bc XG |
422 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
423 | memslot++) | |
be6ba0f0 | 424 | |
fb3f0f51 RR |
425 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
426 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |
427 | ||
9fc77441 | 428 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc7 CO |
429 | void vcpu_put(struct kvm_vcpu *vcpu); |
430 | ||
a0f155e9 CH |
431 | #ifdef __KVM_HAVE_IOAPIC |
432 | int kvm_irqfd_init(void); | |
433 | void kvm_irqfd_exit(void); | |
434 | #else | |
435 | static inline int kvm_irqfd_init(void) | |
436 | { | |
437 | return 0; | |
438 | } | |
439 | ||
440 | static inline void kvm_irqfd_exit(void) | |
441 | { | |
442 | } | |
443 | #endif | |
0ee75bea | 444 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
c16f862d | 445 | struct module *module); |
cb498ea2 | 446 | void kvm_exit(void); |
6aa8b732 | 447 | |
d39f13b0 IE |
448 | void kvm_get_kvm(struct kvm *kvm); |
449 | void kvm_put_kvm(struct kvm *kvm); | |
116c14c0 AW |
450 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, |
451 | u64 last_generation); | |
d39f13b0 | 452 | |
90d83dc3 LJ |
453 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
454 | { | |
455 | return rcu_dereference_check(kvm->memslots, | |
456 | srcu_read_lock_held(&kvm->srcu) | |
457 | || lockdep_is_held(&kvm->slots_lock)); | |
458 | } | |
459 | ||
28a37544 XG |
460 | static inline struct kvm_memory_slot * |
461 | id_to_memslot(struct kvm_memslots *slots, int id) | |
462 | { | |
f85e2cb5 XG |
463 | int index = slots->id_to_index[id]; |
464 | struct kvm_memory_slot *slot; | |
bf3e05bc | 465 | |
f85e2cb5 | 466 | slot = &slots->memslots[index]; |
bf3e05bc | 467 | |
f85e2cb5 XG |
468 | WARN_ON(slot->id != id); |
469 | return slot; | |
28a37544 XG |
470 | } |
471 | ||
74d0727c TY |
472 | /* |
473 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | |
474 | * - create a new memory slot | |
475 | * - delete an existing memory slot | |
476 | * - modify an existing memory slot | |
477 | * -- move it in the guest physical memory space | |
478 | * -- just change its flags | |
479 | * | |
480 | * Since flags can be changed by some of these operations, the following | |
481 | * differentiation is the best we can do for __kvm_set_memory_region(): | |
482 | */ | |
483 | enum kvm_mr_change { | |
484 | KVM_MR_CREATE, | |
485 | KVM_MR_DELETE, | |
486 | KVM_MR_MOVE, | |
487 | KVM_MR_FLAGS_ONLY, | |
488 | }; | |
489 | ||
210c7c4d | 490 | int kvm_set_memory_region(struct kvm *kvm, |
47ae31e2 | 491 | struct kvm_userspace_memory_region *mem); |
f78e0e2e | 492 | int __kvm_set_memory_region(struct kvm *kvm, |
47ae31e2 | 493 | struct kvm_userspace_memory_region *mem); |
db3fe4eb TY |
494 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
495 | struct kvm_memory_slot *dont); | |
496 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); | |
f7784b8e MT |
497 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
498 | struct kvm_memory_slot *memslot, | |
7b6195a9 TY |
499 | struct kvm_userspace_memory_region *mem, |
500 | enum kvm_mr_change change); | |
f7784b8e | 501 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
0de10343 | 502 | struct kvm_userspace_memory_region *mem, |
8482644a TY |
503 | const struct kvm_memory_slot *old, |
504 | enum kvm_mr_change change); | |
db3fe4eb | 505 | bool kvm_largepages_enabled(void); |
54dee993 | 506 | void kvm_disable_largepages(void); |
2df72e9b MT |
507 | /* flush all memory translations */ |
508 | void kvm_arch_flush_shadow_all(struct kvm *kvm); | |
509 | /* flush memory translations pointing to 'slot' */ | |
510 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
511 | struct kvm_memory_slot *slot); | |
a983fb23 | 512 | |
48987781 XG |
513 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, |
514 | int nr_pages); | |
515 | ||
954bbbc2 | 516 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da4558 | 517 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
4d8b81ab | 518 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
b4231d61 IE |
519 | void kvm_release_page_clean(struct page *page); |
520 | void kvm_release_page_dirty(struct page *page); | |
35149e21 AL |
521 | void kvm_set_page_dirty(struct page *page); |
522 | void kvm_set_page_accessed(struct page *page); | |
523 | ||
365fb3fd | 524 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
612819c3 MT |
525 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, |
526 | bool write_fault, bool *writable); | |
35149e21 | 527 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
612819c3 MT |
528 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
529 | bool *writable); | |
d5661048 | 530 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
037d92dc XG |
531 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
532 | ||
32cad84f | 533 | void kvm_release_pfn_dirty(pfn_t pfn); |
35149e21 AL |
534 | void kvm_release_pfn_clean(pfn_t pfn); |
535 | void kvm_set_pfn_dirty(pfn_t pfn); | |
536 | void kvm_set_pfn_accessed(pfn_t pfn); | |
537 | void kvm_get_pfn(pfn_t pfn); | |
538 | ||
195aefde IE |
539 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
540 | int len); | |
7ec54588 MT |
541 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
542 | unsigned long len); | |
195aefde | 543 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
e03b644f GN |
544 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
545 | void *data, unsigned long len); | |
195aefde IE |
546 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
547 | int offset, int len); | |
548 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |
549 | unsigned long len); | |
49c7754c GN |
550 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
551 | void *data, unsigned long len); | |
552 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |
553 | gpa_t gpa); | |
195aefde IE |
554 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
555 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | |
6aa8b732 | 556 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
e0d62c7f | 557 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
8f0b1ab6 | 558 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
6aa8b732 | 559 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
49c7754c GN |
560 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, |
561 | gfn_t gfn); | |
6aa8b732 | 562 | |
8776e519 | 563 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
b6d33834 | 564 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
41628d33 | 565 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target); |
d255f4f2 | 566 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
6aa8b732 | 567 | void kvm_resched(struct kvm_vcpu *vcpu); |
7702fd1f AK |
568 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
569 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |
a4ee1ca4 | 570 | |
d9e368d6 | 571 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63a | 572 | void kvm_reload_remote_mmus(struct kvm *kvm); |
d828199e | 573 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); |
c7c9c56c | 574 | void kvm_make_update_eoibitmap_request(struct kvm *kvm); |
6aa8b732 | 575 | |
043405e1 CO |
576 | long kvm_arch_dev_ioctl(struct file *filp, |
577 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 CO |
578 | long kvm_arch_vcpu_ioctl(struct file *filp, |
579 | unsigned int ioctl, unsigned long arg); | |
5b1c1493 | 580 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2 ZX |
581 | |
582 | int kvm_dev_ioctl_check_extension(long ext); | |
583 | ||
5bb064dc ZX |
584 | int kvm_get_dirty_log(struct kvm *kvm, |
585 | struct kvm_dirty_log *log, int *is_dirty); | |
586 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
587 | struct kvm_dirty_log *log); | |
588 | ||
1fe779f8 | 589 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
47ae31e2 | 590 | struct kvm_userspace_memory_region *mem); |
23d43cf9 | 591 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); |
1fe779f8 CO |
592 | long kvm_arch_vm_ioctl(struct file *filp, |
593 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 | 594 | |
d0752060 HB |
595 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
596 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | |
597 | ||
8b006791 ZX |
598 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
599 | struct kvm_translation *tr); | |
600 | ||
b6c7a5dc HB |
601 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
602 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | |
603 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
604 | struct kvm_sregs *sregs); | |
605 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
606 | struct kvm_sregs *sregs); | |
62d9f0db MT |
607 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
608 | struct kvm_mp_state *mp_state); | |
609 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
610 | struct kvm_mp_state *mp_state); | |
d0bfb940 JK |
611 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
612 | struct kvm_guest_debug *dbg); | |
b6c7a5dc HB |
613 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
614 | ||
f8c16bba ZX |
615 | int kvm_arch_init(void *opaque); |
616 | void kvm_arch_exit(void); | |
043405e1 | 617 | |
e9b11c17 ZX |
618 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
619 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | |
620 | ||
621 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); | |
622 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
623 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
624 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); | |
26e5215f | 625 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
42897d86 | 626 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc62 | 627 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c17 | 628 | |
10474ae8 | 629 | int kvm_arch_hardware_enable(void *garbage); |
e9b11c17 ZX |
630 | void kvm_arch_hardware_disable(void *garbage); |
631 | int kvm_arch_hardware_setup(void); | |
632 | void kvm_arch_hardware_unsetup(void); | |
633 | void kvm_arch_check_processor_compat(void *rtn); | |
1d737c8a | 634 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
b6d33834 | 635 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
e9b11c17 | 636 | |
d19a9cd2 ZX |
637 | void kvm_free_physmem(struct kvm *kvm); |
638 | ||
c1a7b32a TY |
639 | void *kvm_kvzalloc(unsigned long size); |
640 | void kvm_kvfree(const void *addr); | |
641 | ||
d89f5eff JK |
642 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
643 | static inline struct kvm *kvm_arch_alloc_vm(void) | |
644 | { | |
645 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); | |
646 | } | |
647 | ||
648 | static inline void kvm_arch_free_vm(struct kvm *kvm) | |
649 | { | |
650 | kfree(kvm); | |
651 | } | |
652 | #endif | |
653 | ||
b6d33834 CD |
654 | static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
655 | { | |
2246f8b5 AG |
656 | #ifdef __KVM_HAVE_ARCH_WQP |
657 | return vcpu->arch.wqp; | |
658 | #else | |
b6d33834 | 659 | return &vcpu->wq; |
b6d33834 | 660 | #endif |
2246f8b5 | 661 | } |
b6d33834 | 662 | |
e08b9637 | 663 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd2 | 664 | void kvm_arch_destroy_vm(struct kvm *kvm); |
8a98f664 | 665 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
ad8ba2cd | 666 | void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c17 | 667 | |
3d80840d | 668 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
5736199a | 669 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
682c59a3 | 670 | |
a2766325 | 671 | bool kvm_is_mmio_pfn(pfn_t pfn); |
c77fb9dc | 672 | |
62c476c7 BAY |
673 | struct kvm_irq_ack_notifier { |
674 | struct hlist_node link; | |
675 | unsigned gsi; | |
676 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | |
677 | }; | |
678 | ||
679 | struct kvm_assigned_dev_kernel { | |
680 | struct kvm_irq_ack_notifier ack_notifier; | |
62c476c7 BAY |
681 | struct list_head list; |
682 | int assigned_dev_id; | |
ab9f4ecb | 683 | int host_segnr; |
62c476c7 BAY |
684 | int host_busnr; |
685 | int host_devfn; | |
c1e01514 | 686 | unsigned int entries_nr; |
62c476c7 | 687 | int host_irq; |
defaf158 | 688 | bool host_irq_disabled; |
07700a94 | 689 | bool pci_2_3; |
c1e01514 | 690 | struct msix_entry *host_msix_entries; |
62c476c7 | 691 | int guest_irq; |
0645211c | 692 | struct msix_entry *guest_msix_entries; |
4f906c19 | 693 | unsigned long irq_requested_type; |
5550af4d | 694 | int irq_source_id; |
b653574a | 695 | int flags; |
62c476c7 BAY |
696 | struct pci_dev *dev; |
697 | struct kvm *kvm; | |
0645211c | 698 | spinlock_t intx_lock; |
cf9eeac4 | 699 | spinlock_t intx_mask_lock; |
1e001d49 | 700 | char irq_name[32]; |
f8fcfd77 | 701 | struct pci_saved_state *pci_saved_state; |
62c476c7 | 702 | }; |
75858a84 AK |
703 | |
704 | struct kvm_irq_mask_notifier { | |
705 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
706 | int irq; | |
707 | struct hlist_node link; | |
708 | }; | |
709 | ||
710 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
711 | struct kvm_irq_mask_notifier *kimn); | |
712 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
713 | struct kvm_irq_mask_notifier *kimn); | |
4a994358 GN |
714 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, |
715 | bool mask); | |
75858a84 | 716 | |
46e624b9 GN |
717 | #ifdef __KVM_HAVE_IOAPIC |
718 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |
719 | union kvm_ioapic_redirect_entry *entry, | |
720 | unsigned long *deliver_bitmask); | |
721 | #endif | |
722 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | |
01f21880 | 723 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
bd2b53b2 MT |
724 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
725 | int irq_source_id, int level); | |
c7c9c56c | 726 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
44882eed | 727 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc0 XZ |
728 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
729 | struct kvm_irq_ack_notifier *kian); | |
fa40a821 MT |
730 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
731 | struct kvm_irq_ack_notifier *kian); | |
5550af4d SY |
732 | int kvm_request_irq_source_id(struct kvm *kvm); |
733 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |
62c476c7 | 734 | |
522c68c4 SY |
735 | /* For vcpu->arch.iommu_flags */ |
736 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | |
737 | ||
19de40a8 | 738 | #ifdef CONFIG_IOMMU_API |
3ad26d81 | 739 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
32f6daad | 740 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
260782bc | 741 | int kvm_iommu_map_guest(struct kvm *kvm); |
62c476c7 | 742 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
260782bc WH |
743 | int kvm_assign_device(struct kvm *kvm, |
744 | struct kvm_assigned_dev_kernel *assigned_dev); | |
0a920356 WH |
745 | int kvm_deassign_device(struct kvm *kvm, |
746 | struct kvm_assigned_dev_kernel *assigned_dev); | |
19de40a8 | 747 | #else /* CONFIG_IOMMU_API */ |
62c476c7 | 748 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
d7a79b6c | 749 | struct kvm_memory_slot *slot) |
62c476c7 BAY |
750 | { |
751 | return 0; | |
752 | } | |
753 | ||
32f6daad AW |
754 | static inline void kvm_iommu_unmap_pages(struct kvm *kvm, |
755 | struct kvm_memory_slot *slot) | |
756 | { | |
757 | } | |
758 | ||
260782bc | 759 | static inline int kvm_iommu_map_guest(struct kvm *kvm) |
62c476c7 BAY |
760 | { |
761 | return -ENODEV; | |
762 | } | |
763 | ||
764 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | |
765 | { | |
766 | return 0; | |
767 | } | |
260782bc WH |
768 | |
769 | static inline int kvm_assign_device(struct kvm *kvm, | |
770 | struct kvm_assigned_dev_kernel *assigned_dev) | |
771 | { | |
772 | return 0; | |
773 | } | |
0a920356 WH |
774 | |
775 | static inline int kvm_deassign_device(struct kvm *kvm, | |
776 | struct kvm_assigned_dev_kernel *assigned_dev) | |
777 | { | |
778 | return 0; | |
779 | } | |
19de40a8 | 780 | #endif /* CONFIG_IOMMU_API */ |
62c476c7 | 781 | |
6a61671b | 782 | static inline void __guest_enter(void) |
d172fcd3 | 783 | { |
b080935c FW |
784 | /* |
785 | * This is running in ioctl context so we can avoid | |
786 | * the call to vtime_account() with its unnecessary idle check. | |
787 | */ | |
c11f11fc | 788 | vtime_account_system(current); |
d172fcd3 | 789 | current->flags |= PF_VCPU; |
c11f11fc FW |
790 | } |
791 | ||
6a61671b | 792 | static inline void __guest_exit(void) |
c11f11fc FW |
793 | { |
794 | /* | |
795 | * This is running in ioctl context so we can avoid | |
796 | * the call to vtime_account() with its unnecessary idle check. | |
797 | */ | |
798 | vtime_account_system(current); | |
799 | current->flags &= ~PF_VCPU; | |
800 | } | |
801 | ||
6a61671b FW |
802 | #ifdef CONFIG_CONTEXT_TRACKING |
803 | extern void guest_enter(void); | |
804 | extern void guest_exit(void); | |
805 | ||
806 | #else /* !CONFIG_CONTEXT_TRACKING */ | |
807 | static inline void guest_enter(void) | |
808 | { | |
809 | __guest_enter(); | |
810 | } | |
811 | ||
812 | static inline void guest_exit(void) | |
813 | { | |
814 | __guest_exit(); | |
815 | } | |
816 | #endif /* !CONFIG_CONTEXT_TRACKING */ | |
817 | ||
c11f11fc FW |
818 | static inline void kvm_guest_enter(void) |
819 | { | |
820 | unsigned long flags; | |
821 | ||
822 | BUG_ON(preemptible()); | |
823 | ||
824 | local_irq_save(flags); | |
825 | guest_enter(); | |
826 | local_irq_restore(flags); | |
827 | ||
8fa22068 GN |
828 | /* KVM does not hold any references to rcu protected data when it |
829 | * switches CPU into a guest mode. In fact switching to a guest mode | |
830 | * is very similar to exiting to userspase from rcu point of view. In | |
831 | * addition CPU may stay in a guest mode for quite a long time (up to | |
832 | * one time slice). Lets treat guest mode as quiescent state, just like | |
833 | * we do with user-mode execution. | |
834 | */ | |
835 | rcu_virt_note_context_switch(smp_processor_id()); | |
d172fcd3 LV |
836 | } |
837 | ||
838 | static inline void kvm_guest_exit(void) | |
839 | { | |
c11f11fc FW |
840 | unsigned long flags; |
841 | ||
842 | local_irq_save(flags); | |
843 | guest_exit(); | |
844 | local_irq_restore(flags); | |
d172fcd3 LV |
845 | } |
846 | ||
9d4cba7f PM |
847 | /* |
848 | * search_memslots() and __gfn_to_memslot() are here because they are | |
849 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. | |
850 | * gfn_to_memslot() itself isn't here as an inline because that would | |
851 | * bloat other code too much. | |
852 | */ | |
853 | static inline struct kvm_memory_slot * | |
854 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | |
855 | { | |
856 | struct kvm_memory_slot *memslot; | |
857 | ||
858 | kvm_for_each_memslot(memslot, slots) | |
859 | if (gfn >= memslot->base_gfn && | |
860 | gfn < memslot->base_gfn + memslot->npages) | |
861 | return memslot; | |
862 | ||
863 | return NULL; | |
864 | } | |
865 | ||
866 | static inline struct kvm_memory_slot * | |
867 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | |
868 | { | |
869 | return search_memslots(slots, gfn); | |
870 | } | |
871 | ||
66a03505 GS |
872 | static inline unsigned long |
873 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) | |
874 | { | |
875 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | |
876 | } | |
877 | ||
0ee8dcb8 XG |
878 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
879 | { | |
880 | return gfn_to_memslot(kvm, gfn)->id; | |
881 | } | |
882 | ||
fb03cb6f TY |
883 | static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
884 | { | |
885 | /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ | |
886 | return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | |
887 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | |
888 | } | |
889 | ||
d19a748b TY |
890 | static inline gfn_t |
891 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) | |
887c08ac | 892 | { |
d19a748b TY |
893 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
894 | ||
895 | return slot->base_gfn + gfn_offset; | |
887c08ac XG |
896 | } |
897 | ||
1755fbcc AK |
898 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
899 | { | |
900 | return (gpa_t)gfn << PAGE_SHIFT; | |
901 | } | |
6aa8b732 | 902 | |
c30a358d JR |
903 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
904 | { | |
905 | return (gfn_t)(gpa >> PAGE_SHIFT); | |
906 | } | |
907 | ||
62c476c7 BAY |
908 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
909 | { | |
910 | return (hpa_t)pfn << PAGE_SHIFT; | |
911 | } | |
912 | ||
2f599714 | 913 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
2f52d58c AK |
914 | { |
915 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | |
916 | } | |
917 | ||
ba1389b7 AK |
918 | enum kvm_stat_kind { |
919 | KVM_STAT_VM, | |
920 | KVM_STAT_VCPU, | |
921 | }; | |
922 | ||
417bc304 HB |
923 | struct kvm_stats_debugfs_item { |
924 | const char *name; | |
925 | int offset; | |
ba1389b7 | 926 | enum kvm_stat_kind kind; |
417bc304 HB |
927 | struct dentry *dentry; |
928 | }; | |
929 | extern struct kvm_stats_debugfs_item debugfs_entries[]; | |
76f7c879 | 930 | extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d | 931 | |
36c1ed82 | 932 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
8ca40a70 | 933 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe | 934 | { |
8ca40a70 | 935 | if (unlikely(kvm->mmu_notifier_count)) |
e930bffe AA |
936 | return 1; |
937 | /* | |
a355aa54 PM |
938 | * Ensure the read of mmu_notifier_count happens before the read |
939 | * of mmu_notifier_seq. This interacts with the smp_wmb() in | |
940 | * mmu_notifier_invalidate_range_end to make sure that the caller | |
941 | * either sees the old (non-zero) value of mmu_notifier_count or | |
942 | * the new (incremented) value of mmu_notifier_seq. | |
943 | * PowerPC Book3s HV KVM calls this under a per-page lock | |
944 | * rather than under kvm->mmu_lock, for scalability, so | |
945 | * can't rely on kvm->mmu_lock to keep things ordered. | |
e930bffe | 946 | */ |
a355aa54 | 947 | smp_rmb(); |
8ca40a70 | 948 | if (kvm->mmu_notifier_seq != mmu_seq) |
e930bffe AA |
949 | return 1; |
950 | return 0; | |
951 | } | |
952 | #endif | |
953 | ||
9900b4b4 | 954 | #ifdef KVM_CAP_IRQ_ROUTING |
399ec807 AK |
955 | |
956 | #define KVM_MAX_IRQ_ROUTES 1024 | |
957 | ||
958 | int kvm_setup_default_irq_routing(struct kvm *kvm); | |
959 | int kvm_set_irq_routing(struct kvm *kvm, | |
960 | const struct kvm_irq_routing_entry *entries, | |
961 | unsigned nr, | |
962 | unsigned flags); | |
963 | void kvm_free_irq_routing(struct kvm *kvm); | |
964 | ||
07975ad3 JK |
965 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
966 | ||
399ec807 AK |
967 | #else |
968 | ||
969 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |
970 | ||
971 | #endif | |
972 | ||
721eecbf GH |
973 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
974 | ||
d34e6b17 | 975 | void kvm_eventfd_init(struct kvm *kvm); |
914daba8 AG |
976 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
977 | ||
978 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | |
d4db2935 | 979 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf | 980 | void kvm_irqfd_release(struct kvm *kvm); |
bd2b53b2 | 981 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); |
914daba8 AG |
982 | #else |
983 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | |
984 | { | |
985 | return -EINVAL; | |
986 | } | |
987 | ||
988 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
989 | #endif | |
721eecbf GH |
990 | |
991 | #else | |
992 | ||
d34e6b17 | 993 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
bd2b53b2 | 994 | |
d4db2935 | 995 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf GH |
996 | { |
997 | return -EINVAL; | |
998 | } | |
999 | ||
1000 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
bd2b53b2 | 1001 | |
27923eb1 | 1002 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b2 MT |
1003 | static inline void kvm_irq_routing_update(struct kvm *kvm, |
1004 | struct kvm_irq_routing_table *irq_rt) | |
1005 | { | |
1006 | rcu_assign_pointer(kvm->irq_routing, irq_rt); | |
1007 | } | |
27923eb1 | 1008 | #endif |
bd2b53b2 | 1009 | |
d34e6b17 GH |
1010 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
1011 | { | |
1012 | return -ENOSYS; | |
1013 | } | |
721eecbf GH |
1014 | |
1015 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | |
1016 | ||
73880c80 | 1017 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
c5af89b6 GN |
1018 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) |
1019 | { | |
d3efc8ef | 1020 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; |
c5af89b6 | 1021 | } |
3e515705 AK |
1022 | |
1023 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); | |
1024 | ||
1025 | #else | |
1026 | ||
1027 | static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } | |
1028 | ||
6aa8b732 | 1029 | #endif |
bfd99ff5 AK |
1030 | |
1031 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | |
1032 | ||
1033 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |
1034 | unsigned long arg); | |
1035 | ||
1036 | #else | |
1037 | ||
1038 | static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |
1039 | unsigned long arg) | |
1040 | { | |
1041 | return -ENOTTY; | |
1042 | } | |
1043 | ||
73880c80 | 1044 | #endif |
bfd99ff5 | 1045 | |
a8eeb04a AK |
1046 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1047 | { | |
1048 | set_bit(req, &vcpu->requests); | |
1049 | } | |
1050 | ||
a8eeb04a AK |
1051 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
1052 | { | |
0719837c AK |
1053 | if (test_bit(req, &vcpu->requests)) { |
1054 | clear_bit(req, &vcpu->requests); | |
1055 | return true; | |
1056 | } else { | |
1057 | return false; | |
1058 | } | |
a8eeb04a AK |
1059 | } |
1060 | ||
4c088493 R |
1061 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1062 | ||
1063 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1064 | { | |
1065 | vcpu->spin_loop.in_spin_loop = val; | |
1066 | } | |
1067 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1068 | { | |
1069 | vcpu->spin_loop.dy_eligible = val; | |
1070 | } | |
1071 | ||
1072 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | |
1073 | ||
1074 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1075 | { | |
1076 | } | |
1077 | ||
1078 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1079 | { | |
1080 | } | |
1081 | ||
06e48c51 R |
1082 | static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
1083 | { | |
1084 | return true; | |
1085 | } | |
1086 | ||
4c088493 | 1087 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
f445f11e KH |
1088 | #else |
1089 | static inline void __guest_enter(void) { return; } | |
1090 | static inline void __guest_exit(void) { return; } | |
1091 | #endif /* IS_ENABLED(CONFIG_KVM) */ | |
bfd99ff5 | 1092 | #endif |