]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/msr: Convert __wrmsr() uses to native_wrmsr{,q}() uses
authorXin Li (Intel) <xin@zytor.com>
Sun, 27 Apr 2025 09:20:19 +0000 (02:20 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 2 May 2025 08:27:49 +0000 (10:27 +0200)
__wrmsr() is the lowest level MSR write API, with native_wrmsr()
and native_wrmsrq() serving as higher-level wrappers around it:

  #define native_wrmsr(msr, low, high)                    \
          __wrmsr(msr, low, high)

  #define native_wrmsrl(msr, val)                         \
          __wrmsr((msr), (u32)((u64)(val)),               \
                         (u32)((u64)(val) >> 32))

However, __wrmsr() continues to be utilized in various locations.

MSR APIs are designed for different scenarios, such as native or
pvops, with or without trace, and safe or non-safe.  Unfortunately,
the current MSR API names do not adequately reflect these factors,
making it challenging to select the most appropriate API for
various situations.

To pave the way for improving MSR API names, convert __wrmsr()
uses to native_wrmsr{,q}() to ensure consistent usage.  Later,
these APIs can be renamed to better reflect their implications,
such as native or pvops, with or without trace, and safe or
non-safe.

No functional change intended.

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Uros Bizjak <ubizjak@gmail.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-8-xin@zytor.com
arch/x86/events/amd/brs.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/msr.h
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c

index ec4e8a4cace4896ce6d425b2e32aaed3bb2fe512..3f5ecfd80d1ec30caa503c9b9168f351b5fd1f13 100644 (file)
@@ -44,7 +44,7 @@ static inline unsigned int brs_to(int idx)
 static __always_inline void set_debug_extn_cfg(u64 val)
 {
        /* bits[4:3] must always be set to 11b */
-       __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
+       native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
 }
 
 static __always_inline u64 get_debug_extn_cfg(void)
index 1c136f54651cca37cbda3e8a7a013d4fe71607e2..0174dd5483275d3127f86a50e0766eab9d730468 100644 (file)
@@ -214,7 +214,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
 
 static inline void native_apic_msr_eoi(void)
 {
-       __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+       native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK);
 }
 
 static inline u32 native_apic_msr_read(u32 reg)
index fbeb313ccad2bde42b557635503d2c12a98333d9..e5f95a17124fd271e8761920c97413070caf29b0 100644 (file)
@@ -127,10 +127,12 @@ static inline u64 native_read_msr_safe(u32 msr, int *err)
 static inline void notrace
 native_write_msr(u32 msr, u32 low, u32 high)
 {
-       __wrmsr(msr, low, high);
+       u64 val = (u64)high << 32 | low;
+
+       native_wrmsrq(msr, val);
 
        if (tracepoint_enabled(write_msr))
-               do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+               do_trace_write_msr(msr, val, 0);
 }
 
 /* Can be uninlined because referenced by paravirt */
index 7b9908cb5d69dbbbf2d9c303e4877afd34841089..96db2fd03e665966958a33b515ac7f5f19d007d7 100644 (file)
@@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void)
                }
 
                if (mcgstatus & MCG_STATUS_RIPV) {
-                       __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
+                       native_wrmsrq(MSR_IA32_MCG_STATUS, 0);
                        return true;
                }
        }
index 61d762555a7964806253ed9effab166a4470ce0c..6e5edd76086e5e19b4314d99d5ef46ba30d55d7d 100644 (file)
@@ -483,7 +483,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
         * cache.
         */
        saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
-       __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+       native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
        closid_p = this_cpu_read(pqr_state.cur_closid);
        rmid_p = this_cpu_read(pqr_state.cur_rmid);
        mem_r = plr->kmem;
@@ -495,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
         * pseudo-locked followed by reading of kernel memory to load it
         * into the cache.
         */
-       __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+       native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
 
        /*
         * Cache was flushed earlier. Now access kernel memory to read it
@@ -532,7 +532,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
         * Critical section end: restore closid with capacity bitmask that
         * does not overlap with pseudo-locked region.
         */
-       __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+       native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
 
        /* Re-enable the hardware prefetcher(s) */
        wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);