]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/paravirt: Use XOR r32,r32 to clear register in pv_vcpu_is_preempted()
authorUros Bizjak <ubizjak@gmail.com>
Wed, 14 Jan 2026 21:18:15 +0000 (22:18 +0100)
committerBorislav Petkov (AMD) <bp@alien8.de>
Thu, 15 Jan 2026 10:44:29 +0000 (11:44 +0100)
x86_64 zero extends 32bit operations, so for 64bit operands, XOR r32,r32 is
functionally equal to XOR r64,r64, but avoids a REX prefix byte when legacy
registers are used.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Acked-by: Alexey Makhalov <alexey.makhalov@broadcom.com>
Link: https://patch.msgid.link/20260114211948.74774-2-ubizjak@gmail.com
arch/x86/include/asm/paravirt-spinlock.h

index 458b888aba84b62867e525c1611f3e23a1b23744..7beffcb08ed6b31beeb68e96420312a2816a6b18 100644 (file)
@@ -45,7 +45,7 @@ static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
 static __always_inline bool pv_vcpu_is_preempted(long cpu)
 {
        return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
-                               "xor %%" _ASM_AX ", %%" _ASM_AX,
+                               "xor %%eax, %%eax",
                                ALT_NOT(X86_FEATURE_VCPUPREEMPT));
 }