From: Xin Li (Intel) Date: Sun, 27 Apr 2025 09:20:19 +0000 (-0700) Subject: x86/msr: Convert __wrmsr() uses to native_wrmsr{,q}() uses X-Git-Tag: v6.16-rc1~195^2~25^2~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=519be7da37b955164c59aacaee6d6ac89f4bbe15;p=thirdparty%2Fkernel%2Flinux.git x86/msr: Convert __wrmsr() uses to native_wrmsr{,q}() uses __wrmsr() is the lowest level MSR write API, with native_wrmsr() and native_wrmsrq() serving as higher-level wrappers around it: #define native_wrmsr(msr, low, high) \ __wrmsr(msr, low, high) #define native_wrmsrl(msr, val) \ __wrmsr((msr), (u32)((u64)(val)), \ (u32)((u64)(val) >> 32)) However, __wrmsr() continues to be utilized in various locations. MSR APIs are designed for different scenarios, such as native or pvops, with or without trace, and safe or non-safe. Unfortunately, the current MSR API names do not adequately reflect these factors, making it challenging to select the most appropriate API for various situations. To pave the way for improving MSR API names, convert __wrmsr() uses to native_wrmsr{,q}() to ensure consistent usage. Later, these APIs can be renamed to better reflect their implications, such as native or pvops, with or without trace, and safe or non-safe. No functional change intended. Signed-off-by: Xin Li (Intel) Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Brian Gerst Cc: David Woodhouse Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Stefano Stabellini Cc: Uros Bizjak Cc: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20250427092027.1598740-8-xin@zytor.com --- diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index ec4e8a4cace48..3f5ecfd80d1ec 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -44,7 +44,7 @@ static inline unsigned int brs_to(int idx) static __always_inline void set_debug_extn_cfg(u64 val) { /* bits[4:3] must always be set to 11b */ - __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32); + native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3); } static __always_inline u64 get_debug_extn_cfg(void) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 1c136f54651cc..0174dd5483275 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -214,7 +214,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v) static inline void native_apic_msr_eoi(void) { - __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); + native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK); } static inline u32 native_apic_msr_read(u32 reg) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index fbeb313ccad2b..e5f95a17124fd 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -127,10 +127,12 @@ static inline u64 native_read_msr_safe(u32 msr, int *err) static inline void notrace native_write_msr(u32 msr, u32 low, u32 high) { - __wrmsr(msr, low, high); + u64 val = (u64)high << 32 | low; + + native_wrmsrq(msr, val); if (tracepoint_enabled(write_msr)) - do_trace_write_msr(msr, ((u64)high << 32 | low), 0); + do_trace_write_msr(msr, val, 0); } /* Can be uninlined because referenced by paravirt */ diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 7b9908cb5d69d..96db2fd03e665 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void) } if (mcgstatus & MCG_STATUS_RIPV) { - __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); + native_wrmsrq(MSR_IA32_MCG_STATUS, 0); return true; } } diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 61d762555a796..6e5edd76086e5 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -483,7 +483,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * cache. */ saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); - __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); + native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); mem_r = plr->kmem; @@ -495,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); /* * Cache was flushed earlier. Now access kernel memory to read it @@ -532,7 +532,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * Critical section end: restore closid with capacity bitmask that * does not overlap with pseudo-locked region. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); + native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); /* Re-enable the hardware prefetcher(s) */ wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);