1 // SPDX-License-Identifier: GPL-2.0
3 * Hyper-V Isolation VM interface with paravisor and hypervisor
6 * Tianyu Lan <Tianyu.Lan@microsoft.com>
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
21 #ifdef CONFIG_AMD_MEM_ENCRYPT
23 #define GHCB_USAGE_HYPERV_CALL 1
28 u64 hypercalldata
[509];
37 u32 countofelements
: 12;
39 u32 repstartindex
: 12;
48 u32 elementsprocessed
: 12;
56 } __packed
__aligned(HV_HYP_PAGE_SIZE
);
58 static u16 hv_ghcb_version __ro_after_init
;
60 u64
hv_ghcb_hypercall(u64 control
, void *input
, void *output
, u32 input_size
)
62 union hv_ghcb
*hv_ghcb
;
72 local_irq_save(flags
);
73 ghcb_base
= (void **)this_cpu_ptr(hv_ghcb_pg
);
74 hv_ghcb
= (union hv_ghcb
*)*ghcb_base
;
76 local_irq_restore(flags
);
80 hv_ghcb
->ghcb
.protocol_version
= GHCB_PROTOCOL_MAX
;
81 hv_ghcb
->ghcb
.ghcb_usage
= GHCB_USAGE_HYPERV_CALL
;
83 hv_ghcb
->hypercall
.outputgpa
= (u64
)output
;
84 hv_ghcb
->hypercall
.hypercallinput
.asuint64
= 0;
85 hv_ghcb
->hypercall
.hypercallinput
.callcode
= control
;
88 memcpy(hv_ghcb
->hypercall
.hypercalldata
, input
, input_size
);
92 hv_ghcb
->ghcb
.ghcb_usage
= 0xffffffff;
93 memset(hv_ghcb
->ghcb
.save
.valid_bitmap
, 0,
94 sizeof(hv_ghcb
->ghcb
.save
.valid_bitmap
));
96 status
= hv_ghcb
->hypercall
.hypercalloutput
.callstatus
;
98 local_irq_restore(flags
);
103 static inline u64
rd_ghcb_msr(void)
105 return __rdmsr(MSR_AMD64_SEV_ES_GHCB
);
108 static inline void wr_ghcb_msr(u64 val
)
110 native_wrmsrl(MSR_AMD64_SEV_ES_GHCB
, val
);
113 static enum es_result
hv_ghcb_hv_call(struct ghcb
*ghcb
, u64 exit_code
,
114 u64 exit_info_1
, u64 exit_info_2
)
116 /* Fill in protocol and format specifiers */
117 ghcb
->protocol_version
= hv_ghcb_version
;
118 ghcb
->ghcb_usage
= GHCB_DEFAULT_USAGE
;
120 ghcb_set_sw_exit_code(ghcb
, exit_code
);
121 ghcb_set_sw_exit_info_1(ghcb
, exit_info_1
);
122 ghcb_set_sw_exit_info_2(ghcb
, exit_info_2
);
126 if (ghcb
->save
.sw_exit_info_1
& GENMASK_ULL(31, 0))
132 void __noreturn
hv_ghcb_terminate(unsigned int set
, unsigned int reason
)
134 u64 val
= GHCB_MSR_TERM_REQ
;
136 /* Tell the hypervisor what went wrong. */
137 val
|= GHCB_SEV_TERM_REASON(set
, reason
);
139 /* Request Guest Termination from Hypvervisor */
144 asm volatile("hlt\n" : : : "memory");
147 bool hv_ghcb_negotiate_protocol(void)
152 /* Save ghcb page gpa. */
153 ghcb_gpa
= rd_ghcb_msr();
155 /* Do the GHCB protocol version negotiation */
156 wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ
);
160 if (GHCB_MSR_INFO(val
) != GHCB_MSR_SEV_INFO_RESP
)
163 if (GHCB_MSR_PROTO_MAX(val
) < GHCB_PROTOCOL_MIN
||
164 GHCB_MSR_PROTO_MIN(val
) > GHCB_PROTOCOL_MAX
)
167 hv_ghcb_version
= min_t(size_t, GHCB_MSR_PROTO_MAX(val
),
170 /* Write ghcb page back after negotiating protocol. */
171 wr_ghcb_msr(ghcb_gpa
);
177 void hv_ghcb_msr_write(u64 msr
, u64 value
)
179 union hv_ghcb
*hv_ghcb
;
188 local_irq_save(flags
);
189 ghcb_base
= (void **)this_cpu_ptr(hv_ghcb_pg
);
190 hv_ghcb
= (union hv_ghcb
*)*ghcb_base
;
192 local_irq_restore(flags
);
196 ghcb_set_rcx(&hv_ghcb
->ghcb
, msr
);
197 ghcb_set_rax(&hv_ghcb
->ghcb
, lower_32_bits(value
));
198 ghcb_set_rdx(&hv_ghcb
->ghcb
, upper_32_bits(value
));
200 if (hv_ghcb_hv_call(&hv_ghcb
->ghcb
, SVM_EXIT_MSR
, 1, 0))
201 pr_warn("Fail to write msr via ghcb %llx.\n", msr
);
203 local_irq_restore(flags
);
205 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write
);
207 void hv_ghcb_msr_read(u64 msr
, u64
*value
)
209 union hv_ghcb
*hv_ghcb
;
213 /* Check size of union hv_ghcb here. */
214 BUILD_BUG_ON(sizeof(union hv_ghcb
) != HV_HYP_PAGE_SIZE
);
221 local_irq_save(flags
);
222 ghcb_base
= (void **)this_cpu_ptr(hv_ghcb_pg
);
223 hv_ghcb
= (union hv_ghcb
*)*ghcb_base
;
225 local_irq_restore(flags
);
229 ghcb_set_rcx(&hv_ghcb
->ghcb
, msr
);
230 if (hv_ghcb_hv_call(&hv_ghcb
->ghcb
, SVM_EXIT_MSR
, 0, 0))
231 pr_warn("Fail to read msr via ghcb %llx.\n", msr
);
233 *value
= (u64
)lower_32_bits(hv_ghcb
->ghcb
.save
.rax
)
234 | ((u64
)lower_32_bits(hv_ghcb
->ghcb
.save
.rdx
) << 32);
235 local_irq_restore(flags
);
237 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read
);
240 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
242 * In Isolation VM, all guest memory is encrypted from host and guest
243 * needs to set memory visible to host via hvcall before sharing memory
246 static int hv_mark_gpa_visibility(u16 count
, const u64 pfn
[],
247 enum hv_mem_host_visibility visibility
)
249 struct hv_gpa_range_for_visibility
**input_pcpu
, *input
;
254 /* no-op if partition isolation is not enabled */
255 if (!hv_is_isolation_supported())
258 if (count
> HV_MAX_MODIFY_GPA_REP_COUNT
) {
259 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count
,
260 HV_MAX_MODIFY_GPA_REP_COUNT
);
264 local_irq_save(flags
);
265 input_pcpu
= (struct hv_gpa_range_for_visibility
**)
266 this_cpu_ptr(hyperv_pcpu_input_arg
);
268 if (unlikely(!input
)) {
269 local_irq_restore(flags
);
273 input
->partition_id
= HV_PARTITION_ID_SELF
;
274 input
->host_visibility
= visibility
;
275 input
->reserved0
= 0;
276 input
->reserved1
= 0;
277 memcpy((void *)input
->gpa_page_list
, pfn
, count
* sizeof(*pfn
));
278 hv_status
= hv_do_rep_hypercall(
279 HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY
, count
,
280 0, input
, &pages_processed
);
281 local_irq_restore(flags
);
283 if (hv_result_success(hv_status
))
290 * hv_vtom_set_host_visibility - Set specified memory visible to host.
292 * In Isolation VM, all guest memory is encrypted from host and guest
293 * needs to set memory visible to host via hvcall before sharing memory
294 * with host. This function works as wrap of hv_mark_gpa_visibility()
295 * with memory base and size.
297 static bool hv_vtom_set_host_visibility(unsigned long kbuffer
, int pagecount
, bool enc
)
299 enum hv_mem_host_visibility visibility
= enc
?
300 VMBUS_PAGE_NOT_VISIBLE
: VMBUS_PAGE_VISIBLE_READ_WRITE
;
306 pfn_array
= kmalloc(HV_HYP_PAGE_SIZE
, GFP_KERNEL
);
310 for (i
= 0, pfn
= 0; i
< pagecount
; i
++) {
311 pfn_array
[pfn
] = virt_to_hvpfn((void *)kbuffer
+ i
* HV_HYP_PAGE_SIZE
);
314 if (pfn
== HV_MAX_MODIFY_GPA_REP_COUNT
|| i
== pagecount
- 1) {
315 ret
= hv_mark_gpa_visibility(pfn
, pfn_array
,
319 goto err_free_pfn_array
;
330 static bool hv_vtom_tlb_flush_required(bool private)
335 static bool hv_vtom_cache_flush_required(void)
340 static bool hv_is_private_mmio(u64 addr
)
343 * Hyper-V always provides a single IO-APIC in a guest VM.
344 * When a paravisor is used, it is emulated by the paravisor
345 * in the guest context and must be mapped private.
347 if (addr
>= HV_IOAPIC_BASE_ADDRESS
&&
348 addr
< (HV_IOAPIC_BASE_ADDRESS
+ PAGE_SIZE
))
351 /* Same with a vTPM */
352 if (addr
>= VTPM_BASE_ADDRESS
&&
353 addr
< (VTPM_BASE_ADDRESS
+ PAGE_SIZE
))
359 void __init
hv_vtom_init(void)
362 * By design, a VM using vTOM doesn't see the SEV setting,
363 * so SEV initialization is bypassed and sev_status isn't set.
364 * Set it here to indicate a vTOM VM.
366 sev_status
= MSR_AMD64_SNP_VTOM
;
367 cc_set_vendor(CC_VENDOR_AMD
);
368 cc_set_mask(ms_hyperv
.shared_gpa_boundary
);
369 physical_mask
&= ms_hyperv
.shared_gpa_boundary
- 1;
371 x86_platform
.hyper
.is_private_mmio
= hv_is_private_mmio
;
372 x86_platform
.guest
.enc_cache_flush_required
= hv_vtom_cache_flush_required
;
373 x86_platform
.guest
.enc_tlb_flush_required
= hv_vtom_tlb_flush_required
;
374 x86_platform
.guest
.enc_status_change_finish
= hv_vtom_set_host_visibility
;
377 #endif /* CONFIG_AMD_MEM_ENCRYPT */
379 enum hv_isolation_type
hv_get_isolation_type(void)
381 if (!(ms_hyperv
.priv_high
& HV_ISOLATION
))
382 return HV_ISOLATION_TYPE_NONE
;
383 return FIELD_GET(HV_ISOLATION_TYPE
, ms_hyperv
.isolation_config_b
);
385 EXPORT_SYMBOL_GPL(hv_get_isolation_type
);
388 * hv_is_isolation_supported - Check system runs in the Hyper-V
391 bool hv_is_isolation_supported(void)
393 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR
))
396 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV
))
399 return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE
;
402 DEFINE_STATIC_KEY_FALSE(isolation_type_snp
);
405 * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
408 bool hv_isolation_type_snp(void)
410 return static_branch_unlikely(&isolation_type_snp
);