From: Ingo Molnar Date: Tue, 13 May 2025 08:42:06 +0000 (+0200) Subject: Merge branch 'x86/msr' into x86/core, to resolve conflicts X-Git-Tag: v6.16-rc1~195^2~25 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1f82e8e1ca18aa0b020538a3f227f5d56382638e;p=thirdparty%2Fkernel%2Flinux.git Merge branch 'x86/msr' into x86/core, to resolve conflicts Conflicts: arch/x86/boot/startup/sme.c arch/x86/coco/sev/core.c arch/x86/kernel/fpu/core.c arch/x86/kernel/fpu/xstate.c Semantic conflict: arch/x86/include/asm/sev-internal.h Signed-off-by: Ingo Molnar --- 1f82e8e1ca18aa0b020538a3f227f5d56382638e diff --cc arch/x86/boot/startup/sme.c index 753cd2094080b,25f6677e85751..70ea1748c0a78 --- a/arch/x86/boot/startup/sme.c +++ b/arch/x86/boot/startup/sme.c @@@ -523,7 -526,7 +523,7 @@@ void __head sme_enable(struct boot_para me_mask = 1UL << (ebx & 0x3f); /* Check the SEV MSR whether SEV or SME is enabled */ - sev_status = msr = __rdmsr(MSR_AMD64_SEV); - RIP_REL_REF(sev_status) = msr = native_rdmsrq(MSR_AMD64_SEV); ++ sev_status = msr = native_rdmsrq(MSR_AMD64_SEV); feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; /* diff --cc arch/x86/coco/sev/core.c index ac400525de73e,ff82151f77188..b40c159b64e43 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@@ -44,7 -43,10 +44,8 @@@ #include #include #include + #include -#define DR7_RESET_VALUE 0x400 - /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ #define AP_INIT_CS_LIMIT 0xffff #define AP_INIT_DS_LIMIT 0xffff diff --cc arch/x86/include/asm/sev-internal.h index b7232081f8f77,0000000000000..3dfd306d1c9e8 mode 100644,000000..100644 --- a/arch/x86/include/asm/sev-internal.h +++ b/arch/x86/include/asm/sev-internal.h @@@ -1,105 -1,0 +1,105 @@@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define DR7_RESET_VALUE 0x400 + +extern struct ghcb boot_ghcb_page; +extern u64 sev_hv_features; +extern u64 sev_secrets_pa; + +/* #VC handler runtime per-CPU data */ +struct sev_es_runtime_data { + struct ghcb ghcb_page; + + /* + * Reserve one page per CPU as backup storage for the unencrypted GHCB. + * It is needed when an NMI happens while the #VC handler uses the real + * GHCB, and the NMI handler itself is causing another #VC exception. In + * that case the GHCB content of the first handler needs to be backed up + * and restored. + */ + struct ghcb backup_ghcb; + + /* + * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. + * There is no need for it to be atomic, because nothing is written to + * the GHCB between the read and the write of ghcb_active. So it is safe + * to use it when a nested #VC exception happens before the write. + * + * This is necessary for example in the #VC->NMI->#VC case when the NMI + * happens while the first #VC handler uses the GHCB. When the NMI code + * raises a second #VC handler it might overwrite the contents of the + * GHCB written by the first handler. To avoid this the content of the + * GHCB is saved and restored when the GHCB is detected to be in use + * already. + */ + bool ghcb_active; + bool backup_ghcb_active; + + /* + * Cached DR7 value - write it on DR7 writes and return it on reads. + * That value will never make it to the real hardware DR7 as debugging + * is currently unsupported in SEV-ES guests. + */ + unsigned long dr7; +}; + +struct ghcb_state { + struct ghcb *ghcb; +}; + +extern struct svsm_ca boot_svsm_ca_page; + +struct ghcb *__sev_get_ghcb(struct ghcb_state *state); +void __sev_put_ghcb(struct ghcb_state *state); + +DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data); +DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa); + +void early_set_pages_state(unsigned long vaddr, unsigned long paddr, + unsigned long npages, enum psc_op op); + +DECLARE_PER_CPU(struct svsm_ca *, svsm_caa); +DECLARE_PER_CPU(u64, svsm_caa_pa); + +extern struct svsm_ca *boot_svsm_caa; +extern u64 boot_svsm_caa_pa; + +static __always_inline struct svsm_ca *svsm_get_caa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa); + else + return boot_svsm_caa; +} + +static __always_inline u64 svsm_get_caa_pa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa_pa); + else + return boot_svsm_caa_pa; +} + +int svsm_perform_call_protocol(struct svsm_call *call); + +static inline u64 sev_es_rd_ghcb_msr(void) +{ - return __rdmsr(MSR_AMD64_SEV_ES_GHCB); ++ return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB); +} + +static __always_inline void sev_es_wr_ghcb_msr(u64 val) +{ + u32 low, high; + + low = (u32)(val); + high = (u32)(val >> 32); + + native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); +} + +void snp_register_ghcb_early(unsigned long paddr); +bool sev_es_negotiate_protocol(void); +bool sev_es_check_cpu_features(void); +u64 get_hv_features(void); + +const struct snp_cpuid_table *snp_cpuid_get_table(void); diff --cc arch/x86/kernel/fpu/core.c index 1cda5b78540be,e92d27324d9ab..948b4f5fad99c --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@@ -334,8 -328,8 +335,8 @@@ void fpu_sync_guest_vmexit_xfd_state(vo lockdep_assert_irqs_disabled(); if (fpu_state_size_dynamic()) { - rdmsrl(MSR_IA32_XFD, fpstate->xfd); - rdmsrq(MSR_IA32_XFD, fps->xfd); - __this_cpu_write(xfd_state, fps->xfd); ++ rdmsrq(MSR_IA32_XFD, fpstate->xfd); + __this_cpu_write(xfd_state, fpstate->xfd); } } EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); diff --cc arch/x86/kernel/fpu/xstate.c index 1c8410b68108a,86d690afb63c7..3e477a553401e --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@@ -959,7 -910,7 +960,7 @@@ void fpu__resume_cpu(void } if (fpu_state_size_dynamic()) - wrmsrl(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd); - wrmsrq(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); ++ wrmsrq(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd); } /*