* Skip these pages allocation here.
*/
if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
- hv_cpu->synic_message_page =
+ hv_cpu->hyp_synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (!hv_cpu->synic_message_page) {
+ if (!hv_cpu->hyp_synic_message_page) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
- hv_cpu->synic_event_page =
+ hv_cpu->hyp_synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
- if (!hv_cpu->synic_event_page) {
+ if (!hv_cpu->hyp_synic_event_page) {
pr_err("Unable to allocate SYNIC event page\n");
- free_page((unsigned long)hv_cpu->synic_message_page);
- hv_cpu->synic_message_page = NULL;
+ free_page((unsigned long)hv_cpu->hyp_synic_message_page);
+ hv_cpu->hyp_synic_message_page = NULL;
goto err;
}
}
if (!ms_hyperv.paravisor_present &&
(hv_isolation_type_snp() || hv_isolation_type_tdx())) {
ret = set_memory_decrypted((unsigned long)
- hv_cpu->synic_message_page, 1);
+ hv_cpu->hyp_synic_message_page, 1);
if (ret) {
pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
- hv_cpu->synic_message_page = NULL;
+ hv_cpu->hyp_synic_message_page = NULL;
/*
* Free the event page here so that hv_synic_free()
* won't later try to re-encrypt it.
*/
- free_page((unsigned long)hv_cpu->synic_event_page);
- hv_cpu->synic_event_page = NULL;
+ free_page((unsigned long)hv_cpu->hyp_synic_event_page);
+ hv_cpu->hyp_synic_event_page = NULL;
goto err;
}
ret = set_memory_decrypted((unsigned long)
- hv_cpu->synic_event_page, 1);
+ hv_cpu->hyp_synic_event_page, 1);
if (ret) {
pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
- hv_cpu->synic_event_page = NULL;
+ hv_cpu->hyp_synic_event_page = NULL;
goto err;
}
- memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
- memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+ memset(hv_cpu->hyp_synic_message_page, 0, PAGE_SIZE);
+ memset(hv_cpu->hyp_synic_event_page, 0, PAGE_SIZE);
}
}
if (!ms_hyperv.paravisor_present &&
(hv_isolation_type_snp() || hv_isolation_type_tdx())) {
- if (hv_cpu->synic_message_page) {
+ if (hv_cpu->hyp_synic_message_page) {
ret = set_memory_encrypted((unsigned long)
- hv_cpu->synic_message_page, 1);
+ hv_cpu->hyp_synic_message_page, 1);
if (ret) {
pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
- hv_cpu->synic_message_page = NULL;
+ hv_cpu->hyp_synic_message_page = NULL;
}
}
- if (hv_cpu->synic_event_page) {
+ if (hv_cpu->hyp_synic_event_page) {
ret = set_memory_encrypted((unsigned long)
- hv_cpu->synic_event_page, 1);
+ hv_cpu->hyp_synic_event_page, 1);
if (ret) {
pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
- hv_cpu->synic_event_page = NULL;
+ hv_cpu->hyp_synic_event_page = NULL;
}
}
}
free_page((unsigned long)hv_cpu->post_msg_page);
- free_page((unsigned long)hv_cpu->synic_event_page);
- free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->hyp_synic_event_page);
+ free_page((unsigned long)hv_cpu->hyp_synic_message_page);
}
kfree(hv_context.hv_numa_map);
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_message_page =
+ hv_cpu->hyp_synic_message_page =
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
- if (!hv_cpu->synic_message_page)
+ if (!hv_cpu->hyp_synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
- simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page)
>> HV_HYP_PAGE_SHIFT;
}
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_event_page =
+ hv_cpu->hyp_synic_event_page =
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
- if (!hv_cpu->synic_event_page)
+ if (!hv_cpu->hyp_synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
- siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page)
>> HV_HYP_PAGE_SHIFT;
}
*/
simp.simp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition()) {
- iounmap(hv_cpu->synic_message_page);
- hv_cpu->synic_message_page = NULL;
+ iounmap(hv_cpu->hyp_synic_message_page);
+ hv_cpu->hyp_synic_message_page = NULL;
} else {
simp.base_simp_gpa = 0;
}
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition()) {
- iounmap(hv_cpu->synic_event_page);
- hv_cpu->synic_event_page = NULL;
+ iounmap(hv_cpu->hyp_synic_event_page);
+ hv_cpu->hyp_synic_event_page = NULL;
} else {
siefp.base_siefp_gpa = 0;
}
{
struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
union hv_synic_event_flags *event =
- (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
+ (union hv_synic_event_flags *)hv_cpu->hyp_synic_event_page + VMBUS_MESSAGE_SINT;
unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
bool pending;
u32 relid;