]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Drivers: hv: Rename fields for SynIC message and event pages
authorRoman Kisel <romank@linux.microsoft.com>
Wed, 8 Oct 2025 23:34:08 +0000 (16:34 -0700)
committerWei Liu <wei.liu@kernel.org>
Sat, 15 Nov 2025 06:18:14 +0000 (06:18 +0000)
Confidential VMBus requires interacting with two SynICs -- one
provided by the host hypervisor, and one provided by the paravisor.
Each SynIC requires its own message and event pages.

Rename the existing host-accessible SynIC message and event pages
with the "hyp_" prefix to clearly distinguish them from the paravisor
ones. The field name is also changed in mshv_root.* for consistency.

No functional changes.

Signed-off-by: Roman Kisel <romank@linux.microsoft.com>
Reviewed-by: Tianyu Lan <tiala@microsoft.com>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Signed-off-by: Wei Liu <wei.liu@kernel.org>
drivers/hv/channel_mgmt.c
drivers/hv/hv.c
drivers/hv/hyperv_vmbus.h
drivers/hv/mshv_root.h
drivers/hv/mshv_synic.c
drivers/hv/vmbus_drv.c

index 65dd299e2944bd1284e1301a844e88b21182899a..1a33c6944b3c4e215ef34282fa797b1e05b171a3 100644 (file)
@@ -844,14 +844,14 @@ static void vmbus_wait_for_unload(void)
                                = per_cpu_ptr(hv_context.cpu_context, cpu);
 
                        /*
-                        * In a CoCo VM the synic_message_page is not allocated
+                        * In a CoCo VM the hyp_synic_message_page is not allocated
                         * in hv_synic_alloc(). Instead it is set/cleared in
                         * hv_synic_enable_regs() and hv_synic_disable_regs()
                         * such that it is set only when the CPU is online. If
                         * not all present CPUs are online, the message page
                         * might be NULL, so skip such CPUs.
                         */
-                       page_addr = hv_cpu->synic_message_page;
+                       page_addr = hv_cpu->hyp_synic_message_page;
                        if (!page_addr)
                                continue;
 
@@ -892,7 +892,7 @@ completed:
                struct hv_per_cpu_context *hv_cpu
                        = per_cpu_ptr(hv_context.cpu_context, cpu);
 
-               page_addr = hv_cpu->synic_message_page;
+               page_addr = hv_cpu->hyp_synic_message_page;
                if (!page_addr)
                        continue;
 
index ec5d10839e0f057a5aa7c6cca398d004a4c08483..6fbb51e5067364331036125d75028cf7a89d5773 100644 (file)
@@ -147,20 +147,20 @@ int hv_synic_alloc(void)
                 * Skip these pages allocation here.
                 */
                if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
-                       hv_cpu->synic_message_page =
+                       hv_cpu->hyp_synic_message_page =
                                (void *)get_zeroed_page(GFP_ATOMIC);
-                       if (!hv_cpu->synic_message_page) {
+                       if (!hv_cpu->hyp_synic_message_page) {
                                pr_err("Unable to allocate SYNIC message page\n");
                                goto err;
                        }
 
-                       hv_cpu->synic_event_page =
+                       hv_cpu->hyp_synic_event_page =
                                (void *)get_zeroed_page(GFP_ATOMIC);
-                       if (!hv_cpu->synic_event_page) {
+                       if (!hv_cpu->hyp_synic_event_page) {
                                pr_err("Unable to allocate SYNIC event page\n");
 
-                               free_page((unsigned long)hv_cpu->synic_message_page);
-                               hv_cpu->synic_message_page = NULL;
+                               free_page((unsigned long)hv_cpu->hyp_synic_message_page);
+                               hv_cpu->hyp_synic_message_page = NULL;
                                goto err;
                        }
                }
@@ -168,30 +168,30 @@ int hv_synic_alloc(void)
                if (!ms_hyperv.paravisor_present &&
                    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
                        ret = set_memory_decrypted((unsigned long)
-                               hv_cpu->synic_message_page, 1);
+                               hv_cpu->hyp_synic_message_page, 1);
                        if (ret) {
                                pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
-                               hv_cpu->synic_message_page = NULL;
+                               hv_cpu->hyp_synic_message_page = NULL;
 
                                /*
                                 * Free the event page here so that hv_synic_free()
                                 * won't later try to re-encrypt it.
                                 */
-                               free_page((unsigned long)hv_cpu->synic_event_page);
-                               hv_cpu->synic_event_page = NULL;
+                               free_page((unsigned long)hv_cpu->hyp_synic_event_page);
+                               hv_cpu->hyp_synic_event_page = NULL;
                                goto err;
                        }
 
                        ret = set_memory_decrypted((unsigned long)
-                               hv_cpu->synic_event_page, 1);
+                               hv_cpu->hyp_synic_event_page, 1);
                        if (ret) {
                                pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
-                               hv_cpu->synic_event_page = NULL;
+                               hv_cpu->hyp_synic_event_page = NULL;
                                goto err;
                        }
 
-                       memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
-                       memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+                       memset(hv_cpu->hyp_synic_message_page, 0, PAGE_SIZE);
+                       memset(hv_cpu->hyp_synic_event_page, 0, PAGE_SIZE);
                }
        }
 
@@ -227,28 +227,28 @@ void hv_synic_free(void)
 
                if (!ms_hyperv.paravisor_present &&
                    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
-                       if (hv_cpu->synic_message_page) {
+                       if (hv_cpu->hyp_synic_message_page) {
                                ret = set_memory_encrypted((unsigned long)
-                                       hv_cpu->synic_message_page, 1);
+                                       hv_cpu->hyp_synic_message_page, 1);
                                if (ret) {
                                        pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
-                                       hv_cpu->synic_message_page = NULL;
+                                       hv_cpu->hyp_synic_message_page = NULL;
                                }
                        }
 
-                       if (hv_cpu->synic_event_page) {
+                       if (hv_cpu->hyp_synic_event_page) {
                                ret = set_memory_encrypted((unsigned long)
-                                       hv_cpu->synic_event_page, 1);
+                                       hv_cpu->hyp_synic_event_page, 1);
                                if (ret) {
                                        pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
-                                       hv_cpu->synic_event_page = NULL;
+                                       hv_cpu->hyp_synic_event_page = NULL;
                                }
                        }
                }
 
                free_page((unsigned long)hv_cpu->post_msg_page);
-               free_page((unsigned long)hv_cpu->synic_event_page);
-               free_page((unsigned long)hv_cpu->synic_message_page);
+               free_page((unsigned long)hv_cpu->hyp_synic_event_page);
+               free_page((unsigned long)hv_cpu->hyp_synic_message_page);
        }
 
        kfree(hv_context.hv_numa_map);
@@ -278,12 +278,12 @@ void hv_synic_enable_regs(unsigned int cpu)
                /* Mask out vTOM bit. ioremap_cache() maps decrypted */
                u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
                                ~ms_hyperv.shared_gpa_boundary;
-               hv_cpu->synic_message_page =
+               hv_cpu->hyp_synic_message_page =
                        (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
-               if (!hv_cpu->synic_message_page)
+               if (!hv_cpu->hyp_synic_message_page)
                        pr_err("Fail to map synic message page.\n");
        } else {
-               simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+               simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page)
                        >> HV_HYP_PAGE_SHIFT;
        }
 
@@ -297,12 +297,12 @@ void hv_synic_enable_regs(unsigned int cpu)
                /* Mask out vTOM bit. ioremap_cache() maps decrypted */
                u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
                                ~ms_hyperv.shared_gpa_boundary;
-               hv_cpu->synic_event_page =
+               hv_cpu->hyp_synic_event_page =
                        (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
-               if (!hv_cpu->synic_event_page)
+               if (!hv_cpu->hyp_synic_event_page)
                        pr_err("Fail to map synic event page.\n");
        } else {
-               siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+               siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page)
                        >> HV_HYP_PAGE_SHIFT;
        }
 
@@ -362,8 +362,8 @@ void hv_synic_disable_regs(unsigned int cpu)
         */
        simp.simp_enabled = 0;
        if (ms_hyperv.paravisor_present || hv_root_partition()) {
-               iounmap(hv_cpu->synic_message_page);
-               hv_cpu->synic_message_page = NULL;
+               iounmap(hv_cpu->hyp_synic_message_page);
+               hv_cpu->hyp_synic_message_page = NULL;
        } else {
                simp.base_simp_gpa = 0;
        }
@@ -374,8 +374,8 @@ void hv_synic_disable_regs(unsigned int cpu)
        siefp.siefp_enabled = 0;
 
        if (ms_hyperv.paravisor_present || hv_root_partition()) {
-               iounmap(hv_cpu->synic_event_page);
-               hv_cpu->synic_event_page = NULL;
+               iounmap(hv_cpu->hyp_synic_event_page);
+               hv_cpu->hyp_synic_event_page = NULL;
        } else {
                siefp.base_siefp_gpa = 0;
        }
@@ -405,7 +405,7 @@ static bool hv_synic_event_pending(void)
 {
        struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
        union hv_synic_event_flags *event =
-               (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+               (union hv_synic_event_flags *)hv_cpu->hyp_synic_event_page + VMBUS_MESSAGE_SINT;
        unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
        bool pending;
        u32 relid;
index 9ac6f55202875280ec6cbbe7685c67f8bfb1a4f7..d593af45a5b2aeff4a61db6cdd3ebd33e1f844d7 100644 (file)
@@ -121,8 +121,8 @@ enum {
  * Per cpu state for channel handling
  */
 struct hv_per_cpu_context {
-       void *synic_message_page;
-       void *synic_event_page;
+       void *hyp_synic_message_page;
+       void *hyp_synic_event_page;
 
        /*
         * The page is only used in hv_post_message() for a TDX VM (with the
index e3931b0f126932145f68bfa8028a4618ada4a0ad..db6b42db2fdc639a70b84f732fb6868c59a827ea 100644 (file)
@@ -169,7 +169,7 @@ struct mshv_girq_routing_table {
 };
 
 struct hv_synic_pages {
-       struct hv_message_page *synic_message_page;
+       struct hv_message_page *hyp_synic_message_page;
        struct hv_synic_event_flags_page *synic_event_flags_page;
        struct hv_synic_event_ring_page *synic_event_ring_page;
 };
index e6b6381b7c369dca4b58d5625f141419c4c4c9ac..f8b0337cdc821411afb8b07dc9b231be50445f68 100644 (file)
@@ -394,7 +394,7 @@ unlock_out:
 void mshv_isr(void)
 {
        struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
-       struct hv_message_page **msg_page = &spages->synic_message_page;
+       struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
        struct hv_message *msg;
        bool handled;
 
@@ -456,7 +456,7 @@ int mshv_synic_init(unsigned int cpu)
 #endif
        union hv_synic_scontrol sctrl;
        struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
-       struct hv_message_page **msg_page = &spages->synic_message_page;
+       struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
        struct hv_synic_event_flags_page **event_flags_page =
                        &spages->synic_event_flags_page;
        struct hv_synic_event_ring_page **event_ring_page =
@@ -550,7 +550,7 @@ int mshv_synic_cleanup(unsigned int cpu)
        union hv_synic_sirbp sirbp;
        union hv_synic_scontrol sctrl;
        struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
-       struct hv_message_page **msg_page = &spages->synic_message_page;
+       struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
        struct hv_synic_event_flags_page **event_flags_page =
                &spages->synic_event_flags_page;
        struct hv_synic_event_ring_page **event_ring_page =
index 3c414560fa5f0bf3f8e6eb0a04d9ae258005bed5..e12f0ba0701ff6cf8a05727f362d16f894546610 100644 (file)
@@ -1060,7 +1060,7 @@ static void vmbus_onmessage_work(struct work_struct *work)
 void vmbus_on_msg_dpc(unsigned long data)
 {
        struct hv_per_cpu_context *hv_cpu = (void *)data;
-       void *page_addr = hv_cpu->synic_message_page;
+       void *page_addr = hv_cpu->hyp_synic_message_page;
        struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
                                  VMBUS_MESSAGE_SINT;
        struct vmbus_channel_message_header *hdr;
@@ -1244,7 +1244,7 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
         * The event page can be directly checked to get the id of
         * the channel that has the interrupt pending.
         */
-       void *page_addr = hv_cpu->synic_event_page;
+       void *page_addr = hv_cpu->hyp_synic_event_page;
        union hv_synic_event_flags *event
                = (union hv_synic_event_flags *)page_addr +
                                         VMBUS_MESSAGE_SINT;
@@ -1327,7 +1327,7 @@ static void vmbus_isr(void)
 
        vmbus_chan_sched(hv_cpu);
 
-       page_addr = hv_cpu->synic_message_page;
+       page_addr = hv_cpu->hyp_synic_message_page;
        msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
 
        /* Check if there are actual msgs to be processed */