]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge branch 'x86/msr' into x86/core, to resolve conflicts
authorIngo Molnar <mingo@kernel.org>
Tue, 13 May 2025 08:42:06 +0000 (10:42 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 13 May 2025 08:42:06 +0000 (10:42 +0200)
 Conflicts:
arch/x86/boot/startup/sme.c
arch/x86/coco/sev/core.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c

 Semantic conflict:
arch/x86/include/asm/sev-internal.h

Signed-off-by: Ingo Molnar <mingo@kernel.org>
33 files changed:
1  2 
arch/x86/boot/startup/sme.c
arch/x86/coco/sev/core.c
arch/x86/events/amd/ibs.c
arch/x86/events/core.c
arch/x86/include/asm/asm.h
arch/x86/include/asm/debugreg.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/sev-internal.h
arch/x86/kernel/acpi/cppc.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/topology_amd.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/fpu/xstate.h
arch/x86/kernel/process.c
arch/x86/kernel/process_64.c
arch/x86/kernel/traps.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/insn-eval.c
arch/x86/mm/pat/memtype.c
arch/x86/mm/tlb.c
arch/x86/pci/amd_bus.c
drivers/edac/amd64_edac.c

index 753cd2094080bdc3539fdba7a9706218ebf4bebf,25f6677e85751fca94031b240c73669c390d014e..70ea1748c0a786f387d3c89303dca8e4fd4eea7f
@@@ -523,7 -526,7 +523,7 @@@ void __head sme_enable(struct boot_para
        me_mask = 1UL << (ebx & 0x3f);
  
        /* Check the SEV MSR whether SEV or SME is enabled */
-       sev_status = msr = __rdmsr(MSR_AMD64_SEV);
 -      RIP_REL_REF(sev_status) = msr = native_rdmsrq(MSR_AMD64_SEV);
++      sev_status = msr = native_rdmsrq(MSR_AMD64_SEV);
        feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
  
        /*
index ac400525de73e98c2cfb5adb8151ef72d34a92f0,ff82151f77188b792c884c2f7c60b27321bc03b9..b40c159b64e430087f1a8b9ce3b3bca56b9640dd
  #include <asm/apic.h>
  #include <asm/cpuid.h>
  #include <asm/cmdline.h>
+ #include <asm/msr.h>
  
 -#define DR7_RESET_VALUE        0x400
 -
  /* AP INIT values as documented in the APM2  section "Processor Initialization State" */
  #define AP_INIT_CS_LIMIT              0xffff
  #define AP_INIT_DS_LIMIT              0xffff
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index b7232081f8f77fcdbb4e8fbdecddc21fe7ceabbb,0000000000000000000000000000000000000000..3dfd306d1c9e8833128fbb74cd4f18d6ae4d7343
mode 100644,000000..100644
--- /dev/null
@@@ -1,105 -1,0 +1,105 @@@
-       return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 +/* SPDX-License-Identifier: GPL-2.0 */
 +
 +#define DR7_RESET_VALUE        0x400
 +
 +extern struct ghcb boot_ghcb_page;
 +extern u64 sev_hv_features;
 +extern u64 sev_secrets_pa;
 +
 +/* #VC handler runtime per-CPU data */
 +struct sev_es_runtime_data {
 +      struct ghcb ghcb_page;
 +
 +      /*
 +       * Reserve one page per CPU as backup storage for the unencrypted GHCB.
 +       * It is needed when an NMI happens while the #VC handler uses the real
 +       * GHCB, and the NMI handler itself is causing another #VC exception. In
 +       * that case the GHCB content of the first handler needs to be backed up
 +       * and restored.
 +       */
 +      struct ghcb backup_ghcb;
 +
 +      /*
 +       * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
 +       * There is no need for it to be atomic, because nothing is written to
 +       * the GHCB between the read and the write of ghcb_active. So it is safe
 +       * to use it when a nested #VC exception happens before the write.
 +       *
 +       * This is necessary for example in the #VC->NMI->#VC case when the NMI
 +       * happens while the first #VC handler uses the GHCB. When the NMI code
 +       * raises a second #VC handler it might overwrite the contents of the
 +       * GHCB written by the first handler. To avoid this the content of the
 +       * GHCB is saved and restored when the GHCB is detected to be in use
 +       * already.
 +       */
 +      bool ghcb_active;
 +      bool backup_ghcb_active;
 +
 +      /*
 +       * Cached DR7 value - write it on DR7 writes and return it on reads.
 +       * That value will never make it to the real hardware DR7 as debugging
 +       * is currently unsupported in SEV-ES guests.
 +       */
 +      unsigned long dr7;
 +};
 +
 +struct ghcb_state {
 +      struct ghcb *ghcb;
 +};
 +
 +extern struct svsm_ca boot_svsm_ca_page;
 +
 +struct ghcb *__sev_get_ghcb(struct ghcb_state *state);
 +void __sev_put_ghcb(struct ghcb_state *state);
 +
 +DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
 +DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
 +
 +void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
 +                         unsigned long npages, enum psc_op op);
 +
 +DECLARE_PER_CPU(struct svsm_ca *, svsm_caa);
 +DECLARE_PER_CPU(u64, svsm_caa_pa);
 +
 +extern struct svsm_ca *boot_svsm_caa;
 +extern u64 boot_svsm_caa_pa;
 +
 +static __always_inline struct svsm_ca *svsm_get_caa(void)
 +{
 +      if (sev_cfg.use_cas)
 +              return this_cpu_read(svsm_caa);
 +      else
 +              return boot_svsm_caa;
 +}
 +
 +static __always_inline u64 svsm_get_caa_pa(void)
 +{
 +      if (sev_cfg.use_cas)
 +              return this_cpu_read(svsm_caa_pa);
 +      else
 +              return boot_svsm_caa_pa;
 +}
 +
 +int svsm_perform_call_protocol(struct svsm_call *call);
 +
 +static inline u64 sev_es_rd_ghcb_msr(void)
 +{
++      return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
 +}
 +
 +static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 +{
 +      u32 low, high;
 +
 +      low  = (u32)(val);
 +      high = (u32)(val >> 32);
 +
 +      native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
 +}
 +
 +void snp_register_ghcb_early(unsigned long paddr);
 +bool sev_es_negotiate_protocol(void);
 +bool sev_es_check_cpu_features(void);
 +u64 get_hv_features(void);
 +
 +const struct snp_cpuid_table *snp_cpuid_get_table(void);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1cda5b78540be841ccd52a318341b76f643dce5e,e92d27324d9ab5ac16913e92680de25935d3ba22..948b4f5fad99c940b4f95d75a54e39986e57f203
@@@ -334,8 -328,8 +335,8 @@@ void fpu_sync_guest_vmexit_xfd_state(vo
  
        lockdep_assert_irqs_disabled();
        if (fpu_state_size_dynamic()) {
-               rdmsrl(MSR_IA32_XFD, fpstate->xfd);
 -              rdmsrq(MSR_IA32_XFD, fps->xfd);
 -              __this_cpu_write(xfd_state, fps->xfd);
++              rdmsrq(MSR_IA32_XFD, fpstate->xfd);
 +              __this_cpu_write(xfd_state, fpstate->xfd);
        }
  }
  EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
index 1c8410b68108a873e969fe6aa9bd94ae83ef6c83,86d690afb63c78969b8051b321efa776a5037091..3e477a553401e237dcd16812da53d102650411b3
@@@ -959,7 -910,7 +960,7 @@@ void fpu__resume_cpu(void
        }
  
        if (fpu_state_size_dynamic())
-               wrmsrl(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd);
 -              wrmsrq(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd);
++              wrmsrq(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd);
  }
  
  /*
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge