]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: s390: vsie: stop using "struct page" for vsie page
authorDavid Hildenbrand <david@redhat.com>
Tue, 7 Jan 2025 15:43:44 +0000 (16:43 +0100)
committerClaudio Imbrenda <imbrenda@linux.ibm.com>
Fri, 31 Jan 2025 11:03:48 +0000 (12:03 +0100)
Now that we no longer use page->index and the page refcount explicitly,
let's avoid messing with "struct page" completely.

Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Tested-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Message-ID: <20250107154344.1003072-5-david@redhat.com>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/vsie.c

index 97c7c8127543458858e95fb30ca1eebf02c3437b..4581388411b715cb11581bf9afd25512cb45531e 100644 (file)
@@ -931,12 +931,14 @@ struct sie_page2 {
        u8 reserved928[0x1000 - 0x928];                 /* 0x0928 */
 };
 
+struct vsie_page;
+
 struct kvm_s390_vsie {
        struct mutex mutex;
        struct radix_tree_root addr_to_page;
        int page_count;
        int next;
-       struct page *pages[KVM_MAX_VCPUS];
+       struct vsie_page *pages[KVM_MAX_VCPUS];
 };
 
 struct kvm_s390_gisa_iam {
index 424f80f5f6b2d0825943ea4900b9ca6e54f2bca9..a0398ff85d00ba742f081c092d627b1f974cd0e7 100644 (file)
@@ -599,7 +599,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
        struct kvm *kvm = gmap->private;
        struct vsie_page *cur;
        unsigned long prefix;
-       struct page *page;
        int i;
 
        if (!gmap_is_shadow(gmap))
@@ -609,10 +608,9 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
         * therefore we can safely reference them all the time.
         */
        for (i = 0; i < kvm->arch.vsie.page_count; i++) {
-               page = READ_ONCE(kvm->arch.vsie.pages[i]);
-               if (!page)
+               cur = READ_ONCE(kvm->arch.vsie.pages[i]);
+               if (!cur)
                        continue;
-               cur = page_to_virt(page);
                if (READ_ONCE(cur->gmap) != gmap)
                        continue;
                prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
@@ -1384,14 +1382,12 @@ static void put_vsie_page(struct vsie_page *vsie_page)
 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
 {
        struct vsie_page *vsie_page;
-       struct page *page;
        int nr_vcpus;
 
        rcu_read_lock();
-       page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
+       vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
        rcu_read_unlock();
-       if (page) {
-               vsie_page = page_to_virt(page);
+       if (vsie_page) {
                if (try_get_vsie_page(vsie_page)) {
                        if (vsie_page->scb_gpa == addr)
                                return vsie_page;
@@ -1411,20 +1407,18 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
 
        mutex_lock(&kvm->arch.vsie.mutex);
        if (kvm->arch.vsie.page_count < nr_vcpus) {
-               page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
-               if (!page) {
+               vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
+               if (!vsie_page) {
                        mutex_unlock(&kvm->arch.vsie.mutex);
                        return ERR_PTR(-ENOMEM);
                }
-               vsie_page = page_to_virt(page);
                __set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
-               kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
+               kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
                kvm->arch.vsie.page_count++;
        } else {
                /* reuse an existing entry that belongs to nobody */
                while (true) {
-                       page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
-                       vsie_page = page_to_virt(page);
+                       vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
                        if (try_get_vsie_page(vsie_page))
                                break;
                        kvm->arch.vsie.next++;
@@ -1438,7 +1432,8 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
        vsie_page->scb_gpa = ULONG_MAX;
 
        /* Double use of the same address or allocation failure. */
-       if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
+       if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9,
+                             vsie_page)) {
                put_vsie_page(vsie_page);
                mutex_unlock(&kvm->arch.vsie.mutex);
                return NULL;
@@ -1519,20 +1514,18 @@ void kvm_s390_vsie_init(struct kvm *kvm)
 void kvm_s390_vsie_destroy(struct kvm *kvm)
 {
        struct vsie_page *vsie_page;
-       struct page *page;
        int i;
 
        mutex_lock(&kvm->arch.vsie.mutex);
        for (i = 0; i < kvm->arch.vsie.page_count; i++) {
-               page = kvm->arch.vsie.pages[i];
+               vsie_page = kvm->arch.vsie.pages[i];
                kvm->arch.vsie.pages[i] = NULL;
-               vsie_page = page_to_virt(page);
                release_gmap_shadow(vsie_page);
                /* free the radix tree entry */
                if (vsie_page->scb_gpa != ULONG_MAX)
                        radix_tree_delete(&kvm->arch.vsie.addr_to_page,
                                          vsie_page->scb_gpa >> 9);
-               __free_page(page);
+               free_page((unsigned long)vsie_page);
        }
        kvm->arch.vsie.page_count = 0;
        mutex_unlock(&kvm->arch.vsie.mutex);