1 // SPDX-License-Identifier: GPL-2.0
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
72 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
83 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
89 #include <linux/pagemap.h>
90 #include <linux/migrate.h>
91 #include <linux/kvm_host.h>
92 #include <linux/ksm.h>
93 #include <asm/ultravisor.h>
95 #include <asm/kvm_ppc.h>
97 static struct dev_pagemap kvmppc_uvmem_pgmap
;
98 static unsigned long *kvmppc_uvmem_bitmap
;
99 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock
);
101 #define KVMPPC_UVMEM_PFN (1UL << 63)
103 struct kvmppc_uvmem_slot
{
104 struct list_head list
;
105 unsigned long nr_pfns
;
106 unsigned long base_pfn
;
110 struct kvmppc_uvmem_page_pvt
{
116 bool kvmppc_uvmem_available(void)
119 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
120 * and our data structures have been initialized successfully.
122 return !!kvmppc_uvmem_bitmap
;
125 int kvmppc_uvmem_slot_init(struct kvm
*kvm
, const struct kvm_memory_slot
*slot
)
127 struct kvmppc_uvmem_slot
*p
;
129 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
132 p
->pfns
= vzalloc(array_size(slot
->npages
, sizeof(*p
->pfns
)));
137 p
->nr_pfns
= slot
->npages
;
138 p
->base_pfn
= slot
->base_gfn
;
140 mutex_lock(&kvm
->arch
.uvmem_lock
);
141 list_add(&p
->list
, &kvm
->arch
.uvmem_pfns
);
142 mutex_unlock(&kvm
->arch
.uvmem_lock
);
148 * All device PFNs are already released by the time we come here.
150 void kvmppc_uvmem_slot_free(struct kvm
*kvm
, const struct kvm_memory_slot
*slot
)
152 struct kvmppc_uvmem_slot
*p
, *next
;
154 mutex_lock(&kvm
->arch
.uvmem_lock
);
155 list_for_each_entry_safe(p
, next
, &kvm
->arch
.uvmem_pfns
, list
) {
156 if (p
->base_pfn
== slot
->base_gfn
) {
163 mutex_unlock(&kvm
->arch
.uvmem_lock
);
166 static void kvmppc_uvmem_pfn_insert(unsigned long gfn
, unsigned long uvmem_pfn
,
169 struct kvmppc_uvmem_slot
*p
;
171 list_for_each_entry(p
, &kvm
->arch
.uvmem_pfns
, list
) {
172 if (gfn
>= p
->base_pfn
&& gfn
< p
->base_pfn
+ p
->nr_pfns
) {
173 unsigned long index
= gfn
- p
->base_pfn
;
175 p
->pfns
[index
] = uvmem_pfn
| KVMPPC_UVMEM_PFN
;
181 static void kvmppc_uvmem_pfn_remove(unsigned long gfn
, struct kvm
*kvm
)
183 struct kvmppc_uvmem_slot
*p
;
185 list_for_each_entry(p
, &kvm
->arch
.uvmem_pfns
, list
) {
186 if (gfn
>= p
->base_pfn
&& gfn
< p
->base_pfn
+ p
->nr_pfns
) {
187 p
->pfns
[gfn
- p
->base_pfn
] = 0;
193 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn
, struct kvm
*kvm
,
194 unsigned long *uvmem_pfn
)
196 struct kvmppc_uvmem_slot
*p
;
198 list_for_each_entry(p
, &kvm
->arch
.uvmem_pfns
, list
) {
199 if (gfn
>= p
->base_pfn
&& gfn
< p
->base_pfn
+ p
->nr_pfns
) {
200 unsigned long index
= gfn
- p
->base_pfn
;
202 if (p
->pfns
[index
] & KVMPPC_UVMEM_PFN
) {
204 *uvmem_pfn
= p
->pfns
[index
] &
214 unsigned long kvmppc_h_svm_init_start(struct kvm
*kvm
)
216 struct kvm_memslots
*slots
;
217 struct kvm_memory_slot
*memslot
;
221 kvm
->arch
.secure_guest
= KVMPPC_SECURE_INIT_START
;
223 if (!kvmppc_uvmem_bitmap
)
224 return H_UNSUPPORTED
;
226 /* Only radix guests can be secure guests */
227 if (!kvm_is_radix(kvm
))
228 return H_UNSUPPORTED
;
230 /* NAK the transition to secure if not enabled */
231 if (!kvm
->arch
.svm_enabled
)
234 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
235 slots
= kvm_memslots(kvm
);
236 kvm_for_each_memslot(memslot
, slots
) {
237 if (kvmppc_uvmem_slot_init(kvm
, memslot
)) {
241 ret
= uv_register_mem_slot(kvm
->arch
.lpid
,
242 memslot
->base_gfn
<< PAGE_SHIFT
,
243 memslot
->npages
* PAGE_SIZE
,
246 kvmppc_uvmem_slot_free(kvm
, memslot
);
252 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
256 unsigned long kvmppc_h_svm_init_done(struct kvm
*kvm
)
258 if (!(kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
))
259 return H_UNSUPPORTED
;
261 kvm
->arch
.secure_guest
|= KVMPPC_SECURE_INIT_DONE
;
262 pr_info("LPID %d went secure\n", kvm
->arch
.lpid
);
267 * Drop device pages that we maintain for the secure guest
269 * We first mark the pages to be skipped from UV_PAGE_OUT when there
270 * is HV side fault on these pages. Next we *get* these pages, forcing
271 * fault on them, do fault time migration to replace the device PTEs in
272 * QEMU page table with normal PTEs from newly allocated pages.
274 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot
*free
,
275 struct kvm
*kvm
, bool skip_page_out
)
278 struct kvmppc_uvmem_page_pvt
*pvt
;
279 unsigned long pfn
, uvmem_pfn
;
280 unsigned long gfn
= free
->base_gfn
;
282 for (i
= free
->npages
; i
; --i
, ++gfn
) {
283 struct page
*uvmem_page
;
285 mutex_lock(&kvm
->arch
.uvmem_lock
);
286 if (!kvmppc_gfn_is_uvmem_pfn(gfn
, kvm
, &uvmem_pfn
)) {
287 mutex_unlock(&kvm
->arch
.uvmem_lock
);
291 uvmem_page
= pfn_to_page(uvmem_pfn
);
292 pvt
= uvmem_page
->zone_device_data
;
293 pvt
->skip_page_out
= skip_page_out
;
294 mutex_unlock(&kvm
->arch
.uvmem_lock
);
296 pfn
= gfn_to_pfn(kvm
, gfn
);
297 if (is_error_noslot_pfn(pfn
))
299 kvm_release_pfn_clean(pfn
);
303 unsigned long kvmppc_h_svm_init_abort(struct kvm
*kvm
)
306 struct kvm_memory_slot
*memslot
;
309 * Expect to be called only after INIT_START and before INIT_DONE.
310 * If INIT_DONE was completed, use normal VM termination sequence.
312 if (!(kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
))
313 return H_UNSUPPORTED
;
315 if (kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_DONE
)
318 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
320 kvm_for_each_memslot(memslot
, kvm_memslots(kvm
))
321 kvmppc_uvmem_drop_pages(memslot
, kvm
, false);
323 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
325 kvm
->arch
.secure_guest
= 0;
326 uv_svm_terminate(kvm
->arch
.lpid
);
332 * Get a free device PFN from the pool
334 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
335 * PFN will be used to keep track of the secure page on HV side.
337 * Called with kvm->arch.uvmem_lock held
339 static struct page
*kvmppc_uvmem_get_page(unsigned long gpa
, struct kvm
*kvm
)
341 struct page
*dpage
= NULL
;
342 unsigned long bit
, uvmem_pfn
;
343 struct kvmppc_uvmem_page_pvt
*pvt
;
344 unsigned long pfn_last
, pfn_first
;
346 pfn_first
= kvmppc_uvmem_pgmap
.res
.start
>> PAGE_SHIFT
;
347 pfn_last
= pfn_first
+
348 (resource_size(&kvmppc_uvmem_pgmap
.res
) >> PAGE_SHIFT
);
350 spin_lock(&kvmppc_uvmem_bitmap_lock
);
351 bit
= find_first_zero_bit(kvmppc_uvmem_bitmap
,
352 pfn_last
- pfn_first
);
353 if (bit
>= (pfn_last
- pfn_first
))
355 bitmap_set(kvmppc_uvmem_bitmap
, bit
, 1);
356 spin_unlock(&kvmppc_uvmem_bitmap_lock
);
358 pvt
= kzalloc(sizeof(*pvt
), GFP_KERNEL
);
362 uvmem_pfn
= bit
+ pfn_first
;
363 kvmppc_uvmem_pfn_insert(gpa
>> PAGE_SHIFT
, uvmem_pfn
, kvm
);
368 dpage
= pfn_to_page(uvmem_pfn
);
369 dpage
->zone_device_data
= pvt
;
374 spin_lock(&kvmppc_uvmem_bitmap_lock
);
375 bitmap_clear(kvmppc_uvmem_bitmap
, bit
, 1);
377 spin_unlock(&kvmppc_uvmem_bitmap_lock
);
382 * Alloc a PFN from private device memory pool and copy page from normal
383 * memory to secure memory using UV_PAGE_IN uvcall.
386 kvmppc_svm_page_in(struct vm_area_struct
*vma
, unsigned long start
,
387 unsigned long end
, unsigned long gpa
, struct kvm
*kvm
,
388 unsigned long page_shift
, bool *downgrade
)
390 unsigned long src_pfn
, dst_pfn
= 0;
391 struct migrate_vma mig
;
397 memset(&mig
, 0, sizeof(mig
));
405 * We come here with mmap_lock write lock held just for
406 * ksm_madvise(), otherwise we only need read mmap_lock.
407 * Hence downgrade to read lock once ksm_madvise() is done.
409 ret
= ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
410 MADV_UNMERGEABLE
, &vma
->vm_flags
);
411 mmap_write_downgrade(kvm
->mm
);
416 ret
= migrate_vma_setup(&mig
);
420 if (!(*mig
.src
& MIGRATE_PFN_MIGRATE
)) {
425 dpage
= kvmppc_uvmem_get_page(gpa
, kvm
);
431 pfn
= *mig
.src
>> MIGRATE_PFN_SHIFT
;
432 spage
= migrate_pfn_to_page(*mig
.src
);
434 uv_page_in(kvm
->arch
.lpid
, pfn
<< page_shift
, gpa
, 0,
437 *mig
.dst
= migrate_pfn(page_to_pfn(dpage
)) | MIGRATE_PFN_LOCKED
;
438 migrate_vma_pages(&mig
);
440 migrate_vma_finalize(&mig
);
445 * Shares the page with HV, thus making it a normal page.
447 * - If the page is already secure, then provision a new page and share
448 * - If the page is a normal page, share the existing page
450 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
451 * to unmap the device page from QEMU's page tables.
454 kvmppc_share_page(struct kvm
*kvm
, unsigned long gpa
, unsigned long page_shift
)
457 int ret
= H_PARAMETER
;
458 struct page
*uvmem_page
;
459 struct kvmppc_uvmem_page_pvt
*pvt
;
461 unsigned long gfn
= gpa
>> page_shift
;
463 unsigned long uvmem_pfn
;
465 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
466 mutex_lock(&kvm
->arch
.uvmem_lock
);
467 if (kvmppc_gfn_is_uvmem_pfn(gfn
, kvm
, &uvmem_pfn
)) {
468 uvmem_page
= pfn_to_page(uvmem_pfn
);
469 pvt
= uvmem_page
->zone_device_data
;
470 pvt
->skip_page_out
= true;
474 mutex_unlock(&kvm
->arch
.uvmem_lock
);
475 pfn
= gfn_to_pfn(kvm
, gfn
);
476 if (is_error_noslot_pfn(pfn
))
479 mutex_lock(&kvm
->arch
.uvmem_lock
);
480 if (kvmppc_gfn_is_uvmem_pfn(gfn
, kvm
, &uvmem_pfn
)) {
481 uvmem_page
= pfn_to_page(uvmem_pfn
);
482 pvt
= uvmem_page
->zone_device_data
;
483 pvt
->skip_page_out
= true;
484 kvm_release_pfn_clean(pfn
);
488 if (!uv_page_in(kvm
->arch
.lpid
, pfn
<< page_shift
, gpa
, 0, page_shift
))
490 kvm_release_pfn_clean(pfn
);
491 mutex_unlock(&kvm
->arch
.uvmem_lock
);
493 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
498 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
500 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
501 * memory in is visible from both UV and HV.
504 kvmppc_h_svm_page_in(struct kvm
*kvm
, unsigned long gpa
,
505 unsigned long flags
, unsigned long page_shift
)
507 bool downgrade
= false;
508 unsigned long start
, end
;
509 struct vm_area_struct
*vma
;
511 unsigned long gfn
= gpa
>> page_shift
;
514 if (!(kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
))
515 return H_UNSUPPORTED
;
517 if (page_shift
!= PAGE_SHIFT
)
520 if (flags
& ~H_PAGE_IN_SHARED
)
523 if (flags
& H_PAGE_IN_SHARED
)
524 return kvmppc_share_page(kvm
, gpa
, page_shift
);
527 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
528 mmap_write_lock(kvm
->mm
);
530 start
= gfn_to_hva(kvm
, gfn
);
531 if (kvm_is_error_hva(start
))
534 mutex_lock(&kvm
->arch
.uvmem_lock
);
535 /* Fail the page-in request of an already paged-in page */
536 if (kvmppc_gfn_is_uvmem_pfn(gfn
, kvm
, NULL
))
539 end
= start
+ (1UL << page_shift
);
540 vma
= find_vma_intersection(kvm
->mm
, start
, end
);
541 if (!vma
|| vma
->vm_start
> start
|| vma
->vm_end
< end
)
544 if (!kvmppc_svm_page_in(vma
, start
, end
, gpa
, kvm
, page_shift
,
548 mutex_unlock(&kvm
->arch
.uvmem_lock
);
551 mmap_read_unlock(kvm
->mm
);
553 mmap_write_unlock(kvm
->mm
);
554 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
559 * Provision a new page on HV side and copy over the contents
560 * from secure memory using UV_PAGE_OUT uvcall.
563 kvmppc_svm_page_out(struct vm_area_struct
*vma
, unsigned long start
,
564 unsigned long end
, unsigned long page_shift
,
565 struct kvm
*kvm
, unsigned long gpa
)
567 unsigned long src_pfn
, dst_pfn
= 0;
568 struct migrate_vma mig
;
569 struct page
*dpage
, *spage
;
570 struct kvmppc_uvmem_page_pvt
*pvt
;
574 memset(&mig
, 0, sizeof(mig
));
580 mig
.src_owner
= &kvmppc_uvmem_pgmap
;
582 mutex_lock(&kvm
->arch
.uvmem_lock
);
583 /* The requested page is already paged-out, nothing to do */
584 if (!kvmppc_gfn_is_uvmem_pfn(gpa
>> page_shift
, kvm
, NULL
))
587 ret
= migrate_vma_setup(&mig
);
591 spage
= migrate_pfn_to_page(*mig
.src
);
592 if (!spage
|| !(*mig
.src
& MIGRATE_PFN_MIGRATE
))
595 if (!is_zone_device_page(spage
))
598 dpage
= alloc_page_vma(GFP_HIGHUSER
, vma
, start
);
605 pvt
= spage
->zone_device_data
;
606 pfn
= page_to_pfn(dpage
);
609 * This function is used in two cases:
610 * - When HV touches a secure page, for which we do UV_PAGE_OUT
611 * - When a secure page is converted to shared page, we *get*
612 * the page to essentially unmap the device page. In this
613 * case we skip page-out.
615 if (!pvt
->skip_page_out
)
616 ret
= uv_page_out(kvm
->arch
.lpid
, pfn
<< page_shift
,
619 if (ret
== U_SUCCESS
)
620 *mig
.dst
= migrate_pfn(pfn
) | MIGRATE_PFN_LOCKED
;
627 migrate_vma_pages(&mig
);
629 migrate_vma_finalize(&mig
);
631 mutex_unlock(&kvm
->arch
.uvmem_lock
);
636 * Fault handler callback that gets called when HV touches any page that
637 * has been moved to secure memory, we ask UV to give back the page by
638 * issuing UV_PAGE_OUT uvcall.
640 * This eventually results in dropping of device PFN and the newly
641 * provisioned page/PFN gets populated in QEMU page tables.
643 static vm_fault_t
kvmppc_uvmem_migrate_to_ram(struct vm_fault
*vmf
)
645 struct kvmppc_uvmem_page_pvt
*pvt
= vmf
->page
->zone_device_data
;
647 if (kvmppc_svm_page_out(vmf
->vma
, vmf
->address
,
648 vmf
->address
+ PAGE_SIZE
, PAGE_SHIFT
,
650 return VM_FAULT_SIGBUS
;
656 * Release the device PFN back to the pool
658 * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT.
659 * Gets called with kvm->arch.uvmem_lock held.
661 static void kvmppc_uvmem_page_free(struct page
*page
)
663 unsigned long pfn
= page_to_pfn(page
) -
664 (kvmppc_uvmem_pgmap
.res
.start
>> PAGE_SHIFT
);
665 struct kvmppc_uvmem_page_pvt
*pvt
;
667 spin_lock(&kvmppc_uvmem_bitmap_lock
);
668 bitmap_clear(kvmppc_uvmem_bitmap
, pfn
, 1);
669 spin_unlock(&kvmppc_uvmem_bitmap_lock
);
671 pvt
= page
->zone_device_data
;
672 page
->zone_device_data
= NULL
;
673 kvmppc_uvmem_pfn_remove(pvt
->gpa
>> PAGE_SHIFT
, pvt
->kvm
);
677 static const struct dev_pagemap_ops kvmppc_uvmem_ops
= {
678 .page_free
= kvmppc_uvmem_page_free
,
679 .migrate_to_ram
= kvmppc_uvmem_migrate_to_ram
,
683 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
686 kvmppc_h_svm_page_out(struct kvm
*kvm
, unsigned long gpa
,
687 unsigned long flags
, unsigned long page_shift
)
689 unsigned long gfn
= gpa
>> page_shift
;
690 unsigned long start
, end
;
691 struct vm_area_struct
*vma
;
695 if (!(kvm
->arch
.secure_guest
& KVMPPC_SECURE_INIT_START
))
696 return H_UNSUPPORTED
;
698 if (page_shift
!= PAGE_SHIFT
)
705 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
706 mmap_read_lock(kvm
->mm
);
707 start
= gfn_to_hva(kvm
, gfn
);
708 if (kvm_is_error_hva(start
))
711 end
= start
+ (1UL << page_shift
);
712 vma
= find_vma_intersection(kvm
->mm
, start
, end
);
713 if (!vma
|| vma
->vm_start
> start
|| vma
->vm_end
< end
)
716 if (!kvmppc_svm_page_out(vma
, start
, end
, page_shift
, kvm
, gpa
))
719 mmap_read_unlock(kvm
->mm
);
720 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
724 int kvmppc_send_page_to_uv(struct kvm
*kvm
, unsigned long gfn
)
729 pfn
= gfn_to_pfn(kvm
, gfn
);
730 if (is_error_noslot_pfn(pfn
))
733 mutex_lock(&kvm
->arch
.uvmem_lock
);
734 if (kvmppc_gfn_is_uvmem_pfn(gfn
, kvm
, NULL
))
737 ret
= uv_page_in(kvm
->arch
.lpid
, pfn
<< PAGE_SHIFT
, gfn
<< PAGE_SHIFT
,
740 kvm_release_pfn_clean(pfn
);
741 mutex_unlock(&kvm
->arch
.uvmem_lock
);
742 return (ret
== U_SUCCESS
) ? RESUME_GUEST
: -EFAULT
;
745 static u64
kvmppc_get_secmem_size(void)
747 struct device_node
*np
;
752 np
= of_find_compatible_node(NULL
, NULL
, "ibm,uv-firmware");
756 prop
= of_get_property(np
, "secure-memory-ranges", &len
);
760 for (i
= 0; i
< len
/ (sizeof(*prop
) * 4); i
++)
761 size
+= of_read_number(prop
+ (i
* 4) + 2, 2);
769 int kvmppc_uvmem_init(void)
773 struct resource
*res
;
775 unsigned long pfn_last
, pfn_first
;
777 size
= kvmppc_get_secmem_size();
780 * Don't fail the initialization of kvm-hv module if
781 * the platform doesn't export ibm,uv-firmware node.
782 * Let normal guests run on such PEF-disabled platform.
784 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
788 res
= request_free_mem_region(&iomem_resource
, size
, "kvmppc_uvmem");
794 kvmppc_uvmem_pgmap
.type
= MEMORY_DEVICE_PRIVATE
;
795 kvmppc_uvmem_pgmap
.res
= *res
;
796 kvmppc_uvmem_pgmap
.ops
= &kvmppc_uvmem_ops
;
797 /* just one global instance: */
798 kvmppc_uvmem_pgmap
.owner
= &kvmppc_uvmem_pgmap
;
799 addr
= memremap_pages(&kvmppc_uvmem_pgmap
, NUMA_NO_NODE
);
802 goto out_free_region
;
805 pfn_first
= res
->start
>> PAGE_SHIFT
;
806 pfn_last
= pfn_first
+ (resource_size(res
) >> PAGE_SHIFT
);
807 kvmppc_uvmem_bitmap
= kcalloc(BITS_TO_LONGS(pfn_last
- pfn_first
),
808 sizeof(unsigned long), GFP_KERNEL
);
809 if (!kvmppc_uvmem_bitmap
) {
814 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size
);
817 memunmap_pages(&kvmppc_uvmem_pgmap
);
819 release_mem_region(res
->start
, size
);
824 void kvmppc_uvmem_free(void)
826 if (!kvmppc_uvmem_bitmap
)
829 memunmap_pages(&kvmppc_uvmem_pgmap
);
830 release_mem_region(kvmppc_uvmem_pgmap
.res
.start
,
831 resource_size(&kvmppc_uvmem_pgmap
.res
));
832 kfree(kvmppc_uvmem_bitmap
);