]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Jun 2022 10:22:28 +0000 (12:22 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Jun 2022 10:22:28 +0000 (12:22 +0200)
added patches:
x86-kvm-use-proper-asm-macros-for-kvm_vcpu_is_preempted.patch

queue-5.15/series
queue-5.15/x86-kvm-use-proper-asm-macros-for-kvm_vcpu_is_preempted.patch [new file with mode: 0644]

index 205e665900a0a435f259c880c7fa5d9c6c306359..db6a4b5b72951a5f47ff016c952bdda050a2b53f 100644 (file)
@@ -1,2 +1,3 @@
 tick-nohz-unexport-__init-annotated-tick_nohz_full_setup.patch
 clocksource-drivers-ixp4xx-remove-__init-from-ixp4xx_timer_setup.patch
+x86-kvm-use-proper-asm-macros-for-kvm_vcpu_is_preempted.patch
diff --git a/queue-5.15/x86-kvm-use-proper-asm-macros-for-kvm_vcpu_is_preempted.patch b/queue-5.15/x86-kvm-use-proper-asm-macros-for-kvm_vcpu_is_preempted.patch
new file mode 100644 (file)
index 0000000..a530dbe
--- /dev/null
@@ -0,0 +1,33 @@
+From 94d7e45bd6424b92149b452863df9ac962cf44c0 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 30 Jun 2022 12:19:47 +0200
+Subject: x86, kvm: use proper ASM macros for kvm_vcpu_is_preempted
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+The build rightfully complains about:
+       arch/x86/kernel/kvm.o: warning: objtool: __raw_callee_save___kvm_vcpu_is_preempted()+0x12: missing int3 after ret
+
+because the ASM_RET call is not being used correctly in kvm_vcpu_is_preempted().
+
+This was hand-fixed-up in the kvm merge commit a4cfff3f0f8c ("Merge branch
+'kvm-older-features' into HEAD") which of course can not be backported to
+stable kernels, so just fix this up directly instead.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kvm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -948,7 +948,7 @@ asm(
+ "movq __per_cpu_offset(,%rdi,8), %rax;"
+ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+ "setne        %al;"
+-"ret;"
++ASM_RET
+ ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
+ ".popsection");