]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Jan 2022 08:21:46 +0000 (09:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Jan 2022 08:21:46 +0000 (09:21 +0100)
added patches:
kvm-s390-clarify-sigp-orders-versus-stop-restart.patch
media-uvcvideo-fix-division-by-zero-at-stream-start.patch
orangefs-fix-the-size-of-a-memory-allocation-in-orangefs_bufmap_alloc.patch
perf-protect-perf_guest_cbs-with-rcu.patch
rtlwifi-rtl8192cu-fix-warning-when-calling-local_irq_restore-with-interrupts-enabled.patch
vfs-fs_context-fix-up-param-length-parsing-in-legacy_parse_param.patch

queue-5.4/kvm-s390-clarify-sigp-orders-versus-stop-restart.patch [new file with mode: 0644]
queue-5.4/media-uvcvideo-fix-division-by-zero-at-stream-start.patch [new file with mode: 0644]
queue-5.4/orangefs-fix-the-size-of-a-memory-allocation-in-orangefs_bufmap_alloc.patch [new file with mode: 0644]
queue-5.4/perf-protect-perf_guest_cbs-with-rcu.patch [new file with mode: 0644]
queue-5.4/rtlwifi-rtl8192cu-fix-warning-when-calling-local_irq_restore-with-interrupts-enabled.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/vfs-fs_context-fix-up-param-length-parsing-in-legacy_parse_param.patch [new file with mode: 0644]

diff --git a/queue-5.4/kvm-s390-clarify-sigp-orders-versus-stop-restart.patch b/queue-5.4/kvm-s390-clarify-sigp-orders-versus-stop-restart.patch
new file mode 100644 (file)
index 0000000..b89121a
--- /dev/null
@@ -0,0 +1,121 @@
+From 812de04661c4daa7ac385c0dfd62594540538034 Mon Sep 17 00:00:00 2001
+From: Eric Farman <farman@linux.ibm.com>
+Date: Mon, 13 Dec 2021 22:05:50 +0100
+Subject: KVM: s390: Clarify SIGP orders versus STOP/RESTART
+
+From: Eric Farman <farman@linux.ibm.com>
+
+commit 812de04661c4daa7ac385c0dfd62594540538034 upstream.
+
+With KVM_CAP_S390_USER_SIGP, there are only five Signal Processor
+orders (CONDITIONAL EMERGENCY SIGNAL, EMERGENCY SIGNAL, EXTERNAL CALL,
+SENSE, and SENSE RUNNING STATUS) which are intended for frequent use
+and thus are processed in-kernel. The remainder are sent to userspace
+with the KVM_CAP_S390_USER_SIGP capability. Of those, three orders
+(RESTART, STOP, and STOP AND STORE STATUS) have the potential to
+inject work back into the kernel, and thus are asynchronous.
+
+Let's look for those pending IRQs when processing one of the in-kernel
+SIGP orders, and return BUSY (CC2) if one is in process. This is in
+agreement with the Principles of Operation, which states that only one
+order can be "active" on a CPU at a time.
+
+Cc: stable@vger.kernel.org
+Suggested-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Eric Farman <farman@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Link: https://lore.kernel.org/r/20211213210550.856213-2-farman@linux.ibm.com
+[borntraeger@linux.ibm.com: add stable tag]
+Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/interrupt.c |    7 +++++++
+ arch/s390/kvm/kvm-s390.c  |    9 +++++++--
+ arch/s390/kvm/kvm-s390.h  |    1 +
+ arch/s390/kvm/sigp.c      |   28 ++++++++++++++++++++++++++++
+ 4 files changed, 43 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1982,6 +1982,13 @@ int kvm_s390_is_stop_irq_pending(struct
+       return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ }
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
++{
++      struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
++
++      return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
++}
++
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4205,10 +4205,15 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu
+       spin_lock(&vcpu->kvm->arch.start_stop_lock);
+       online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
+-      /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
++      /*
++       * Set the VCPU to STOPPED and THEN clear the interrupt flag,
++       * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
++       * have been fully processed. This will ensure that the VCPU
++       * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
++       */
++      kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+       kvm_s390_clear_stop_irq(vcpu);
+-      kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+       __disable_ibs_on_vcpu(vcpu);
+       for (i = 0; i < online_vcpus; i++) {
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -373,6 +373,7 @@ void kvm_s390_destroy_adapters(struct kv
+ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
+ extern struct kvm_device_ops kvm_flic_ops;
+ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+ int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+                          void __user *buf, int len);
+--- a/arch/s390/kvm/sigp.c
++++ b/arch/s390/kvm/sigp.c
+@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vc
+       if (!dst_vcpu)
+               return SIGP_CC_NOT_OPERATIONAL;
++      /*
++       * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
++       * are processed asynchronously. Until the affected VCPU finishes
++       * its work and calls back into KVM to clear the (RESTART or STOP)
++       * interrupt, we need to return any new non-reset orders "busy".
++       *
++       * This is important because a single VCPU could issue:
++       *  1) SIGP STOP $DESTINATION
++       *  2) SIGP SENSE $DESTINATION
++       *
++       * If the SIGP SENSE would not be rejected as "busy", it could
++       * return an incorrect answer as to whether the VCPU is STOPPED
++       * or OPERATING.
++       */
++      if (order_code != SIGP_INITIAL_CPU_RESET &&
++          order_code != SIGP_CPU_RESET) {
++              /*
++               * Lockless check. Both SIGP STOP and SIGP (RE)START
++               * properly synchronize everything while processing
++               * their orders, while the guest cannot observe a
++               * difference when issuing other orders from two
++               * different VCPUs.
++               */
++              if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
++                  kvm_s390_is_restart_irq_pending(dst_vcpu))
++                      return SIGP_CC_BUSY;
++      }
++
+       switch (order_code) {
+       case SIGP_SENSE:
+               vcpu->stat.instruction_sigp_sense++;
diff --git a/queue-5.4/media-uvcvideo-fix-division-by-zero-at-stream-start.patch b/queue-5.4/media-uvcvideo-fix-division-by-zero-at-stream-start.patch
new file mode 100644 (file)
index 0000000..b6bb34c
--- /dev/null
@@ -0,0 +1,43 @@
+From 8aa637bf6d70d2fb2ad4d708d8b9dd02b1c095df Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 26 Oct 2021 11:55:11 +0200
+Subject: media: uvcvideo: fix division by zero at stream start
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 8aa637bf6d70d2fb2ad4d708d8b9dd02b1c095df upstream.
+
+Add the missing bulk-endpoint max-packet sanity check to
+uvc_video_start_transfer() to avoid division by zero in
+uvc_alloc_urb_buffers() in case a malicious device has broken
+descriptors (or when doing descriptor fuzz testing).
+
+Note that USB core will reject URBs submitted for endpoints with zero
+wMaxPacketSize but that drivers doing packet-size calculations still
+need to handle this (cf. commit 2548288b4fb0 ("USB: Fix: Don't skip
+endpoint descriptors with maxpacket=0")).
+
+Fixes: c0efd232929c ("V4L/DVB (8145a): USB Video Class driver")
+Cc: stable@vger.kernel.org      # 2.6.26
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/uvc/uvc_video.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(stru
+               if (ep == NULL)
+                       return -EIO;
++              /* Reject broken descriptors. */
++              if (usb_endpoint_maxp(&ep->desc) == 0)
++                      return -EIO;
++
+               ret = uvc_init_video_bulk(stream, ep, gfp_flags);
+       }
diff --git a/queue-5.4/orangefs-fix-the-size-of-a-memory-allocation-in-orangefs_bufmap_alloc.patch b/queue-5.4/orangefs-fix-the-size-of-a-memory-allocation-in-orangefs_bufmap_alloc.patch
new file mode 100644 (file)
index 0000000..d167d70
--- /dev/null
@@ -0,0 +1,61 @@
+From 40a74870b2d1d3d44e13b3b73c6571dd34f5614d Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Mon, 27 Dec 2021 19:09:18 +0100
+Subject: orangefs: Fix the size of a memory allocation in orangefs_bufmap_alloc()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 40a74870b2d1d3d44e13b3b73c6571dd34f5614d upstream.
+
+'buffer_index_array' really looks like a bitmap. So it should be allocated
+as such.
+When kzalloc is called, a number of bytes is expected, but a number of
+longs is passed instead.
+
+In get(), if not enough memory is allocated, un-allocated memory may be
+read or written.
+
+So use bitmap_zalloc() to safely allocate the correct memory size and
+avoid un-expected behavior.
+
+While at it, change the corresponding kfree() into bitmap_free() to keep
+the semantic.
+
+Fixes: ea2c9c9f6574 ("orangefs: bufmap rewrite")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Mike Marshall <hubcap@omnibond.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/orangefs/orangefs-bufmap.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/fs/orangefs/orangefs-bufmap.c
++++ b/fs/orangefs/orangefs-bufmap.c
+@@ -179,7 +179,7 @@ orangefs_bufmap_free(struct orangefs_buf
+ {
+       kfree(bufmap->page_array);
+       kfree(bufmap->desc_array);
+-      kfree(bufmap->buffer_index_array);
++      bitmap_free(bufmap->buffer_index_array);
+       kfree(bufmap);
+ }
+@@ -229,8 +229,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_de
+       bufmap->desc_size = user_desc->size;
+       bufmap->desc_shift = ilog2(bufmap->desc_size);
+-      bufmap->buffer_index_array =
+-              kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
++      bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
+       if (!bufmap->buffer_index_array)
+               goto out_free_bufmap;
+@@ -253,7 +252,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_de
+ out_free_desc_array:
+       kfree(bufmap->desc_array);
+ out_free_index_array:
+-      kfree(bufmap->buffer_index_array);
++      bitmap_free(bufmap->buffer_index_array);
+ out_free_bufmap:
+       kfree(bufmap);
+ out:
diff --git a/queue-5.4/perf-protect-perf_guest_cbs-with-rcu.patch b/queue-5.4/perf-protect-perf_guest_cbs-with-rcu.patch
new file mode 100644 (file)
index 0000000..0d39383
--- /dev/null
@@ -0,0 +1,432 @@
+From ff083a2d972f56bebfd82409ca62e5dfce950961 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 11 Nov 2021 02:07:22 +0000
+Subject: perf: Protect perf_guest_cbs with RCU
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ff083a2d972f56bebfd82409ca62e5dfce950961 upstream.
+
+Protect perf_guest_cbs with RCU to fix multiple possible errors.  Luckily,
+all paths that read perf_guest_cbs already require RCU protection, e.g. to
+protect the callback chains, so only the direct perf_guest_cbs touchpoints
+need to be modified.
+
+Bug #1 is a simple lack of WRITE_ONCE/READ_ONCE behavior to ensure
+perf_guest_cbs isn't reloaded between a !NULL check and a dereference.
+Fixed via the READ_ONCE() in rcu_dereference().
+
+Bug #2 is that on weakly-ordered architectures, updates to the callbacks
+themselves are not guaranteed to be visible before the pointer is made
+visible to readers.  Fixed by the smp_store_release() in
+rcu_assign_pointer() when the new pointer is non-NULL.
+
+Bug #3 is that, because the callbacks are global, it's possible for
+readers to run in parallel with an unregisters, and thus a module
+implementing the callbacks can be unloaded while readers are in flight,
+resulting in a use-after-free.  Fixed by a synchronize_rcu() call when
+unregistering callbacks.
+
+Bug #1 escaped notice because it's extremely unlikely a compiler will
+reload perf_guest_cbs in this sequence.  perf_guest_cbs does get reloaded
+for future derefs, e.g. for ->is_user_mode(), but the ->is_in_guest()
+guard all but guarantees the consumer will win the race, e.g. to nullify
+perf_guest_cbs, KVM has to completely exit the guest and teardown down
+all VMs before KVM start its module unload / unregister sequence.  This
+also makes it all but impossible to encounter bug #3.
+
+Bug #2 has not been a problem because all architectures that register
+callbacks are strongly ordered and/or have a static set of callbacks.
+
+But with help, unloading kvm_intel can trigger bug #1 e.g. wrapping
+perf_guest_cbs with READ_ONCE in perf_misc_flags() while spamming
+kvm_intel module load/unload leads to:
+
+  BUG: kernel NULL pointer dereference, address: 0000000000000000
+  #PF: supervisor read access in kernel mode
+  #PF: error_code(0x0000) - not-present page
+  PGD 0 P4D 0
+  Oops: 0000 [#1] PREEMPT SMP
+  CPU: 6 PID: 1825 Comm: stress Not tainted 5.14.0-rc2+ #459
+  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+  RIP: 0010:perf_misc_flags+0x1c/0x70
+  Call Trace:
+   perf_prepare_sample+0x53/0x6b0
+   perf_event_output_forward+0x67/0x160
+   __perf_event_overflow+0x52/0xf0
+   handle_pmi_common+0x207/0x300
+   intel_pmu_handle_irq+0xcf/0x410
+   perf_event_nmi_handler+0x28/0x50
+   nmi_handle+0xc7/0x260
+   default_do_nmi+0x6b/0x170
+   exc_nmi+0x103/0x130
+   asm_exc_nmi+0x76/0xbf
+
+Fixes: 39447b386c84 ("perf: Enhance perf to allow for guest statistic collection from host")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20211111020738.2512932-2-seanjc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/perf_callchain.c   |   17 +++++++++++------
+ arch/arm64/kernel/perf_callchain.c |   18 ++++++++++++------
+ arch/csky/kernel/perf_callchain.c  |    6 ++++--
+ arch/nds32/kernel/perf_event_cpu.c |   17 +++++++++++------
+ arch/riscv/kernel/perf_callchain.c |    7 +++++--
+ arch/x86/events/core.c             |   17 +++++++++++------
+ arch/x86/events/intel/core.c       |    9 ++++++---
+ include/linux/perf_event.h         |   13 ++++++++++++-
+ kernel/events/core.c               |   13 ++++++++++---
+ 9 files changed, 82 insertions(+), 35 deletions(-)
+
+--- a/arch/arm/kernel/perf_callchain.c
++++ b/arch/arm/kernel/perf_callchain.c
+@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct frame_tail __user *tail;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callch
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+       return instruction_pointer(regs);
+ }
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+--- a/arch/arm64/kernel/perf_callchain.c
++++ b/arch/arm64/kernel/perf_callchain.c
+@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_fram
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -147,9 +149,10 @@ static int callchain_trace(struct stackf
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe frame;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_c
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+       return instruction_pointer(regs);
+ }
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+--- a/arch/csky/kernel/perf_callchain.c
++++ b/arch/csky/kernel/perf_callchain.c
+@@ -86,10 +86,11 @@ static unsigned long user_backtrace(stru
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+       /* C-SKY does not support virtualization. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++      if (guest_cbs && guest_cbs->is_in_guest())
+               return;
+       fp = regs->regs[4];
+@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_cal
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+       /* C-SKY does not support virtualization. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               pr_warn("C-SKY does not support perf in guest mode!");
+               return;
+       }
+--- a/arch/nds32/kernel/perf_event_cpu.c
++++ b/arch/nds32/kernel/perf_event_cpu.c
+@@ -1363,6 +1363,7 @@ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                   struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+       unsigned long gp = 0;
+       unsigned long lp = 0;
+@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchai
+       leaf_fp = 0;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -1479,9 +1480,10 @@ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                     struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stackframe fr;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* We don't support guest os callchain now */
+               return;
+       }
+@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callch
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+       /* However, NDS32 does not support virtualization */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+       return instruction_pointer(regs);
+ }
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+       /* However, NDS32 does not support virtualization */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+--- a/arch/riscv/kernel/perf_callchain.c
++++ b/arch/riscv/kernel/perf_callchain.c
+@@ -60,10 +60,11 @@ static unsigned long user_backtrace(stru
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       unsigned long fp = 0;
+       /* RISC-V does not support perf in guest mode. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++      if (guest_cbs && guest_cbs->is_in_guest())
+               return;
+       fp = regs->s0;
+@@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+                          struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+       /* RISC-V does not support perf in guest mode. */
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               pr_warn("RISC-V does not support perf in guest mode!");
+               return;
+       }
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2366,10 +2366,11 @@ static bool perf_hw_regs(struct pt_regs
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct unwind_state state;
+       unsigned long addr;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+@@ -2475,10 +2476,11 @@ perf_callchain_user32(struct pt_regs *re
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       struct stack_frame frame;
+       const unsigned long __user *fp;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++      if (guest_cbs && guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return;
+       }
+@@ -2562,18 +2564,21 @@ static unsigned long code_segment_base(s
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+-              return perf_guest_cbs->get_guest_ip();
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++      if (guest_cbs && guest_cbs->is_in_guest())
++              return guest_cbs->get_guest_ip();
+       return regs->ip + code_segment_base(regs);
+ }
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++      struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+       int misc = 0;
+-      if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+-              if (perf_guest_cbs->is_user_mode())
++      if (guest_cbs && guest_cbs->is_in_guest()) {
++              if (guest_cbs->is_user_mode())
+                       misc |= PERF_RECORD_MISC_GUEST_USER;
+               else
+                       misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2333,6 +2333,7 @@ static int handle_pmi_common(struct pt_r
+ {
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      struct perf_guest_info_callbacks *guest_cbs;
+       int bit;
+       int handled = 0;
+@@ -2386,9 +2387,11 @@ static int handle_pmi_common(struct pt_r
+        */
+       if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+               handled++;
+-              if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
+-                      perf_guest_cbs->handle_intel_pt_intr))
+-                      perf_guest_cbs->handle_intel_pt_intr();
++
++              guest_cbs = perf_get_guest_cbs();
++              if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
++                           guest_cbs->handle_intel_pt_intr))
++                      guest_cbs->handle_intel_pt_intr();
+               else
+                       intel_pt_interrupt();
+       }
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1175,7 +1175,18 @@ extern void perf_event_bpf_event(struct
+                                enum perf_bpf_event_type type,
+                                u16 flags);
+-extern struct perf_guest_info_callbacks *perf_guest_cbs;
++extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
++static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
++{
++      /*
++       * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
++       * the callbacks between a !NULL check and dereferences, to ensure
++       * pending stores/changes to the callback pointers are visible before a
++       * non-NULL perf_guest_cbs is visible to readers, and to prevent a
++       * module from unloading callbacks while readers are active.
++       */
++      return rcu_dereference(perf_guest_cbs);
++}
+ extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6045,18 +6045,25 @@ static void perf_pending_event(struct ir
+  * Later on, we might change it to a list if there is
+  * another virtualization implementation supporting the callbacks.
+  */
+-struct perf_guest_info_callbacks *perf_guest_cbs;
++struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+ int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+ {
+-      perf_guest_cbs = cbs;
++      if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
++              return -EBUSY;
++
++      rcu_assign_pointer(perf_guest_cbs, cbs);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+ {
+-      perf_guest_cbs = NULL;
++      if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
++              return -EINVAL;
++
++      rcu_assign_pointer(perf_guest_cbs, NULL);
++      synchronize_rcu();
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
diff --git a/queue-5.4/rtlwifi-rtl8192cu-fix-warning-when-calling-local_irq_restore-with-interrupts-enabled.patch b/queue-5.4/rtlwifi-rtl8192cu-fix-warning-when-calling-local_irq_restore-with-interrupts-enabled.patch
new file mode 100644 (file)
index 0000000..4cabdaf
--- /dev/null
@@ -0,0 +1,45 @@
+From 8b144dedb928e4e2f433a328d58f44c3c098d63e Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Wed, 15 Dec 2021 11:11:05 -0600
+Subject: rtlwifi: rtl8192cu: Fix WARNING when calling local_irq_restore() with interrupts enabled
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit 8b144dedb928e4e2f433a328d58f44c3c098d63e upstream.
+
+Syzbot reports the following WARNING:
+
+[200~raw_local_irq_restore() called with IRQs enabled
+WARNING: CPU: 1 PID: 1206 at kernel/locking/irqflag-debug.c:10
+   warn_bogus_irq_restore+0x1d/0x20 kernel/locking/irqflag-debug.c:10
+
+Hardware initialization for the rtl8188cu can run for as long as 350 ms,
+and the routine may be called with interrupts disabled. To avoid locking
+the machine for this long, the current routine saves the interrupt flags
+and enables local interrupts. The problem is that it restores the flags
+at the end without disabling local interrupts first.
+
+This patch fixes commit a53268be0cb9 ("rtlwifi: rtl8192cu: Fix too long
+disable of IRQs").
+
+Reported-by: syzbot+cce1ee31614c171f5595@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Fixes: a53268be0cb9 ("rtlwifi: rtl8192cu: Fix too long disable of IRQs")
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20211215171105.20623-1-Larry.Finger@lwfinger.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw
+       _initpabias(hw);
+       rtl92c_dm_init(hw);
+ exit:
++      local_irq_disable();
+       local_irq_restore(flags);
+       return err;
+ }
index f419018911481ede8093fe6d57f4fa2d67744f09..ed42b8be1b49dcb260378f4f313ac2b660c47c17 100644 (file)
@@ -1,2 +1,8 @@
 kbuild-add-kbuild_hostldflags-to-has_libelf-test.patch
 devtmpfs-regression-fix-reconfigure-on-each-mount.patch
+orangefs-fix-the-size-of-a-memory-allocation-in-orangefs_bufmap_alloc.patch
+vfs-fs_context-fix-up-param-length-parsing-in-legacy_parse_param.patch
+perf-protect-perf_guest_cbs-with-rcu.patch
+kvm-s390-clarify-sigp-orders-versus-stop-restart.patch
+media-uvcvideo-fix-division-by-zero-at-stream-start.patch
+rtlwifi-rtl8192cu-fix-warning-when-calling-local_irq_restore-with-interrupts-enabled.patch
diff --git a/queue-5.4/vfs-fs_context-fix-up-param-length-parsing-in-legacy_parse_param.patch b/queue-5.4/vfs-fs_context-fix-up-param-length-parsing-in-legacy_parse_param.patch
new file mode 100644 (file)
index 0000000..9613249
--- /dev/null
@@ -0,0 +1,37 @@
+From 722d94847de29310e8aa03fcbdb41fc92c521756 Mon Sep 17 00:00:00 2001
+From: Jamie Hill-Daniel <jamie@hill-daniel.co.uk>
+Date: Tue, 18 Jan 2022 08:06:04 +0100
+Subject: vfs: fs_context: fix up param length parsing in legacy_parse_param
+
+From: Jamie Hill-Daniel <jamie@hill-daniel.co.uk>
+
+commit 722d94847de29310e8aa03fcbdb41fc92c521756 upstream.
+
+The "PAGE_SIZE - 2 - size" calculation in legacy_parse_param() is an
+unsigned type so a large value of "size" results in a high positive
+value instead of a negative value as expected.  Fix this by getting rid
+of the subtraction.
+
+Signed-off-by: Jamie Hill-Daniel <jamie@hill-daniel.co.uk>
+Signed-off-by: William Liu <willsroot@protonmail.com>
+Tested-by: Salvatore Bonaccorso <carnil@debian.org>
+Tested-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fs_context.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -585,7 +585,7 @@ static int legacy_parse_param(struct fs_
+                             param->key);
+       }
+-      if (len > PAGE_SIZE - 2 - size)
++      if (size + len + 2 > PAGE_SIZE)
+               return invalf(fc, "VFS: Legacy: Cumulative options too large");
+       if (strchr(param->key, ',') ||
+           (param->type == fs_value_is_string &&