The boot_{rdmsr,wrmsr}() helpers are *just* the barebones MSR access
functionality, without any tracing or exception handling glue as it is done in
kernel proper.
Move these helpers to asm/shared/msr.h and rename to raw_{rdmsr,wrmsr}() to
indicate what they are.
[ bp: Correct the reason why those helpers exist. I should've caught that in
the original patch that added them:
176db622573f ("x86/boot: Introduce helpers for MSR reads/writes"
but oh well...
- fixup include path delimiters to <> ]
Signed-off-by: John Allen <john.allen@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://patch.msgid.link/all/20250924200852.4452-2-john.allen@amd.com
#include <asm/bootparam.h>
#include <asm/pgtable_types.h>
+#include <asm/shared/msr.h>
#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
}
/* Set the SME mask if this is an SEV guest. */
- boot_rdmsr(MSR_AMD64_SEV, &m);
+ raw_rdmsr(MSR_AMD64_SEV, &m);
sev_status = m.q;
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
return;
if (sev_check_cpu_support() < 0)
return 0;
- boot_rdmsr(MSR_AMD64_SEV, &m);
+ raw_rdmsr(MSR_AMD64_SEV, &m);
return m.q;
}
struct msr m;
/* Obtain the address of the calling area to use */
- boot_rdmsr(MSR_SVSM_CAA, &m);
+ raw_rdmsr(MSR_SVSM_CAA, &m);
boot_svsm_caa_pa = m.q;
/*
#ifdef CONFIG_AMD_MEM_ENCRYPT
-#include "../msr.h"
+#include <asm/shared/msr.h>
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 sev_get_status(void);
{
struct msr m;
- boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+ raw_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
return m.q;
}
struct msr m;
m.q = val;
- boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
+ raw_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
}
#else
#include <asm/intel-family.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
+#include <asm/shared/msr.h>
#include "string.h"
-#include "msr.h"
static u32 err_flags[NCAPINTS];
struct msr m;
- boot_rdmsr(MSR_K7_HWCR, &m);
+ raw_rdmsr(MSR_K7_HWCR, &m);
m.l &= ~(1 << 15);
- boot_wrmsr(MSR_K7_HWCR, &m);
+ raw_wrmsr(MSR_K7_HWCR, &m);
get_cpuflags(); /* Make sure it really did something */
err = check_cpuflags();
struct msr m;
- boot_rdmsr(MSR_VIA_FCR, &m);
+ raw_rdmsr(MSR_VIA_FCR, &m);
m.l |= (1 << 1) | (1 << 7);
- boot_wrmsr(MSR_VIA_FCR, &m);
+ raw_wrmsr(MSR_VIA_FCR, &m);
set_bit(X86_FEATURE_CX8, cpu.flags);
err = check_cpuflags();
struct msr m, m_tmp;
u32 level = 1;
- boot_rdmsr(0x80860004, &m);
+ raw_rdmsr(0x80860004, &m);
m_tmp = m;
m_tmp.l = ~0;
- boot_wrmsr(0x80860004, &m_tmp);
+ raw_wrmsr(0x80860004, &m_tmp);
asm("cpuid"
: "+a" (level), "=d" (cpu.flags[0])
: : "ecx", "ebx");
- boot_wrmsr(0x80860004, &m);
+ raw_wrmsr(0x80860004, &m);
err = check_cpuflags();
} else if (err == 0x01 &&
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Helpers/definitions related to MSR access.
- */
-
-#ifndef BOOT_MSR_H
-#define BOOT_MSR_H
-
-#include <asm/shared/msr.h>
-
-/*
- * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
- * boot kernel since they rely on tracepoint/exception handling infrastructure
- * that's not available here.
- */
-static inline void boot_rdmsr(unsigned int reg, struct msr *m)
-{
- asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
-}
-
-static inline void boot_wrmsr(unsigned int reg, const struct msr *m)
-{
- asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
-}
-
-#endif /* BOOT_MSR_H */
};
};
+/*
+ * The kernel proper already defines rdmsr()/wrmsr(), but they are not for the
+ * boot kernel since they rely on tracepoint/exception handling infrastructure
+ * that's not available here.
+ */
+static inline void raw_rdmsr(unsigned int reg, struct msr *m)
+{
+ asm volatile("rdmsr" : "=a" (m->l), "=d" (m->h) : "c" (reg));
+}
+
+static inline void raw_wrmsr(unsigned int reg, const struct msr *m)
+{
+ asm volatile("wrmsr" : : "c" (reg), "a"(m->l), "d" (m->h) : "memory");
+}
+
#endif /* _ASM_X86_SHARED_MSR_H */