]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Jun 2021 14:26:47 +0000 (16:26 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Jun 2021 14:26:47 +0000 (16:26 +0200)
added patches:
i2c-robotfuzz-osif-fix-control-request-directions.patch
kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch
kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch
kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch

queue-5.4/i2c-robotfuzz-osif-fix-control-request-directions.patch [new file with mode: 0644]
queue-5.4/kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch [new file with mode: 0644]
queue-5.4/kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch [new file with mode: 0644]
queue-5.4/kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/i2c-robotfuzz-osif-fix-control-request-directions.patch b/queue-5.4/i2c-robotfuzz-osif-fix-control-request-directions.patch
new file mode 100644 (file)
index 0000000..dfaa7b6
--- /dev/null
@@ -0,0 +1,50 @@
+From 4ca070ef0dd885616ef294d269a9bf8e3b258e1a Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 24 May 2021 11:09:12 +0200
+Subject: i2c: robotfuzz-osif: fix control-request directions
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 4ca070ef0dd885616ef294d269a9bf8e3b258e1a upstream.
+
+The direction of the pipe argument must match the request-type direction
+bit or control requests may fail depending on the host-controller-driver
+implementation.
+
+Control transfers without a data stage are treated as OUT requests by
+the USB stack and should be using usb_sndctrlpipe(). Failing to do so
+will now trigger a warning.
+
+Fix the OSIFI2C_SET_BIT_RATE and OSIFI2C_STOP requests which erroneously
+used the osif_usb_read() helper and set the IN direction bit.
+
+Reported-by: syzbot+9d7dadd15b8819d73f41@syzkaller.appspotmail.com
+Fixes: 83e53a8f120f ("i2c: Add bus driver for for OSIF USB i2c device.")
+Cc: stable@vger.kernel.org      # 3.14
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-robotfuzz-osif.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
+@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter
+                       }
+               }
+-              ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
++              ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
+               if (ret) {
+                       dev_err(&adapter->dev, "failure sending STOP\n");
+                       return -EREMOTEIO;
+@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interfa
+        * Set bus frequency. The frequency is:
+        * 120,000,000 / ( 16 + 2 * div * 4^prescale).
+        * Using dev = 52, prescale = 0 give 100KHz */
+-      ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
++      ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
+                           NULL, 0);
+       if (ret) {
+               dev_err(&interface->dev, "failure sending bit rate");
diff --git a/queue-5.4/kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch b/queue-5.4/kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch
new file mode 100644 (file)
index 0000000..4991aa4
--- /dev/null
@@ -0,0 +1,181 @@
+From 5fa54346caf67b4b1b10b1f390316ae466da4d53 Mon Sep 17 00:00:00 2001
+From: Petr Mladek <pmladek@suse.com>
+Date: Thu, 24 Jun 2021 18:39:48 -0700
+Subject: kthread: prevent deadlock when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync()
+
+From: Petr Mladek <pmladek@suse.com>
+
+commit 5fa54346caf67b4b1b10b1f390316ae466da4d53 upstream.
+
+The system might hang with the following backtrace:
+
+       schedule+0x80/0x100
+       schedule_timeout+0x48/0x138
+       wait_for_common+0xa4/0x134
+       wait_for_completion+0x1c/0x2c
+       kthread_flush_work+0x114/0x1cc
+       kthread_cancel_work_sync.llvm.16514401384283632983+0xe8/0x144
+       kthread_cancel_delayed_work_sync+0x18/0x2c
+       xxxx_pm_notify+0xb0/0xd8
+       blocking_notifier_call_chain_robust+0x80/0x194
+       pm_notifier_call_chain_robust+0x28/0x4c
+       suspend_prepare+0x40/0x260
+       enter_state+0x80/0x3f4
+       pm_suspend+0x60/0xdc
+       state_store+0x108/0x144
+       kobj_attr_store+0x38/0x88
+       sysfs_kf_write+0x64/0xc0
+       kernfs_fop_write_iter+0x108/0x1d0
+       vfs_write+0x2f4/0x368
+       ksys_write+0x7c/0xec
+
+It is caused by the following race between kthread_mod_delayed_work()
+and kthread_cancel_delayed_work_sync():
+
+CPU0                           CPU1
+
+Context: Thread A              Context: Thread B
+
+kthread_mod_delayed_work()
+  spin_lock()
+  __kthread_cancel_work()
+     spin_unlock()
+     del_timer_sync()
+                               kthread_cancel_delayed_work_sync()
+                                 spin_lock()
+                                 __kthread_cancel_work()
+                                   spin_unlock()
+                                   del_timer_sync()
+                                   spin_lock()
+
+                                 work->canceling++
+                                 spin_unlock
+     spin_lock()
+   queue_delayed_work()
+     // dwork is put into the worker->delayed_work_list
+
+   spin_unlock()
+
+                                 kthread_flush_work()
+     // flush_work is put at the tail of the dwork
+
+                                   wait_for_completion()
+
+Context: IRQ
+
+  kthread_delayed_work_timer_fn()
+    spin_lock()
+    list_del_init(&work->node);
+    spin_unlock()
+
+BANG: flush_work is not longer linked and will never get proceed.
+
+The problem is that kthread_mod_delayed_work() checks work->canceling
+flag before canceling the timer.
+
+A simple solution is to (re)check work->canceling after
+__kthread_cancel_work().  But then it is not clear what should be
+returned when __kthread_cancel_work() removed the work from the queue
+(list) and it can't queue it again with the new @delay.
+
+The return value might be used for reference counting.  The caller has
+to know whether a new work has been queued or an existing one was
+replaced.
+
+The proper solution is that kthread_mod_delayed_work() will remove the
+work from the queue (list) _only_ when work->canceling is not set.  The
+flag must be checked after the timer is stopped and the remaining
+operations can be done under worker->lock.
+
+Note that kthread_mod_delayed_work() could remove the timer and then
+bail out.  It is fine.  The other canceling caller needs to cancel the
+timer as well.  The important thing is that the queue (list)
+manipulation is done atomically under worker->lock.
+
+Link: https://lkml.kernel.org/r/20210610133051.15337-3-pmladek@suse.com
+Fixes: 9a6b06c8d9a220860468a ("kthread: allow to modify delayed kthread work")
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Reported-by: Martin Liu <liumartin@google.com>
+Cc: <jenhaochen@google.com>
+Cc: Minchan Kim <minchan@google.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kthread.c |   35 ++++++++++++++++++++++++-----------
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -1047,8 +1047,11 @@ static void kthread_cancel_delayed_work_
+ }
+ /*
+- * This function removes the work from the worker queue. Also it makes sure
+- * that it won't get queued later via the delayed work's timer.
++ * This function removes the work from the worker queue.
++ *
++ * It is called under worker->lock. The caller must make sure that
++ * the timer used by delayed work is not running, e.g. by calling
++ * kthread_cancel_delayed_work_timer().
+  *
+  * The work might still be in use when this function finishes. See the
+  * current_work proceed by the worker.
+@@ -1056,13 +1059,8 @@ static void kthread_cancel_delayed_work_
+  * Return: %true if @work was pending and successfully canceled,
+  *    %false if @work was not pending
+  */
+-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+-                                unsigned long *flags)
++static bool __kthread_cancel_work(struct kthread_work *work)
+ {
+-      /* Try to cancel the timer if exists. */
+-      if (is_dwork)
+-              kthread_cancel_delayed_work_timer(work, flags);
+-
+       /*
+        * Try to remove the work from a worker list. It might either
+        * be from worker->work_list or from worker->delayed_work_list.
+@@ -1115,11 +1113,23 @@ bool kthread_mod_delayed_work(struct kth
+       /* Work must not be used with >1 worker, see kthread_queue_work() */
+       WARN_ON_ONCE(work->worker != worker);
+-      /* Do not fight with another command that is canceling this work. */
++      /*
++       * Temporary cancel the work but do not fight with another command
++       * that is canceling the work as well.
++       *
++       * It is a bit tricky because of possible races with another
++       * mod_delayed_work() and cancel_delayed_work() callers.
++       *
++       * The timer must be canceled first because worker->lock is released
++       * when doing so. But the work can be removed from the queue (list)
++       * only when it can be queued again so that the return value can
++       * be used for reference counting.
++       */
++      kthread_cancel_delayed_work_timer(work, &flags);
+       if (work->canceling)
+               goto out;
++      ret = __kthread_cancel_work(work);
+-      ret = __kthread_cancel_work(work, true, &flags);
+ fast_queue:
+       __kthread_queue_delayed_work(worker, dwork, delay);
+ out:
+@@ -1141,7 +1151,10 @@ static bool __kthread_cancel_work_sync(s
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
+-      ret = __kthread_cancel_work(work, is_dwork, &flags);
++      if (is_dwork)
++              kthread_cancel_delayed_work_timer(work, &flags);
++
++      ret = __kthread_cancel_work(work);
+       if (worker->current_work != work)
+               goto out_fast;
diff --git a/queue-5.4/kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch b/queue-5.4/kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch
new file mode 100644 (file)
index 0000000..f0e2be7
--- /dev/null
@@ -0,0 +1,102 @@
+From 34b3d5344719d14fd2185b2d9459b3abcb8cf9d8 Mon Sep 17 00:00:00 2001
+From: Petr Mladek <pmladek@suse.com>
+Date: Thu, 24 Jun 2021 18:39:45 -0700
+Subject: kthread_worker: split code for canceling the delayed work timer
+
+From: Petr Mladek <pmladek@suse.com>
+
+commit 34b3d5344719d14fd2185b2d9459b3abcb8cf9d8 upstream.
+
+Patch series "kthread_worker: Fix race between kthread_mod_delayed_work()
+and kthread_cancel_delayed_work_sync()".
+
+This patchset fixes the race between kthread_mod_delayed_work() and
+kthread_cancel_delayed_work_sync() including proper return value
+handling.
+
+This patch (of 2):
+
+Simple code refactoring as a preparation step for fixing a race between
+kthread_mod_delayed_work() and kthread_cancel_delayed_work_sync().
+
+It does not modify the existing behavior.
+
+Link: https://lkml.kernel.org/r/20210610133051.15337-2-pmladek@suse.com
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Cc: <jenhaochen@google.com>
+Cc: Martin Liu <liumartin@google.com>
+Cc: Minchan Kim <minchan@google.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kthread.c |   46 +++++++++++++++++++++++++++++-----------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -1020,6 +1020,33 @@ void kthread_flush_work(struct kthread_w
+ EXPORT_SYMBOL_GPL(kthread_flush_work);
+ /*
++ * Make sure that the timer is neither set nor running and could
++ * not manipulate the work list_head any longer.
++ *
++ * The function is called under worker->lock. The lock is temporary
++ * released but the timer can't be set again in the meantime.
++ */
++static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
++                                            unsigned long *flags)
++{
++      struct kthread_delayed_work *dwork =
++              container_of(work, struct kthread_delayed_work, work);
++      struct kthread_worker *worker = work->worker;
++
++      /*
++       * del_timer_sync() must be called to make sure that the timer
++       * callback is not running. The lock must be temporary released
++       * to avoid a deadlock with the callback. In the meantime,
++       * any queuing is blocked by setting the canceling counter.
++       */
++      work->canceling++;
++      raw_spin_unlock_irqrestore(&worker->lock, *flags);
++      del_timer_sync(&dwork->timer);
++      raw_spin_lock_irqsave(&worker->lock, *flags);
++      work->canceling--;
++}
++
++/*
+  * This function removes the work from the worker queue. Also it makes sure
+  * that it won't get queued later via the delayed work's timer.
+  *
+@@ -1033,23 +1060,8 @@ static bool __kthread_cancel_work(struct
+                                 unsigned long *flags)
+ {
+       /* Try to cancel the timer if exists. */
+-      if (is_dwork) {
+-              struct kthread_delayed_work *dwork =
+-                      container_of(work, struct kthread_delayed_work, work);
+-              struct kthread_worker *worker = work->worker;
+-
+-              /*
+-               * del_timer_sync() must be called to make sure that the timer
+-               * callback is not running. The lock must be temporary released
+-               * to avoid a deadlock with the callback. In the meantime,
+-               * any queuing is blocked by setting the canceling counter.
+-               */
+-              work->canceling++;
+-              raw_spin_unlock_irqrestore(&worker->lock, *flags);
+-              del_timer_sync(&dwork->timer);
+-              raw_spin_lock_irqsave(&worker->lock, *flags);
+-              work->canceling--;
+-      }
++      if (is_dwork)
++              kthread_cancel_delayed_work_timer(work, flags);
+       /*
+        * Try to remove the work from a worker list. It might either
diff --git a/queue-5.4/kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch b/queue-5.4/kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch
new file mode 100644 (file)
index 0000000..e622bd3
--- /dev/null
@@ -0,0 +1,70 @@
+From f8be156be163a052a067306417cd0ff679068c97 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Thu, 24 Jun 2021 08:29:04 -0400
+Subject: KVM: do not allow mapping valid but non-reference-counted pages
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit f8be156be163a052a067306417cd0ff679068c97 upstream.
+
+It's possible to create a region which maps valid but non-refcounted
+pages (e.g., tail pages of non-compound higher order allocations). These
+host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family
+of APIs, which take a reference to the page, which takes it from 0 to 1.
+When the reference is dropped, this will free the page incorrectly.
+
+Fix this by only taking a reference on valid pages if it was non-zero,
+which indicates it is participating in normal refcounting (and can be
+released with put_page).
+
+This addresses CVE-2021-22543.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Tested-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |   19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1593,6 +1593,13 @@ static bool vma_is_valid(struct vm_area_
+       return true;
+ }
++static int kvm_try_get_pfn(kvm_pfn_t pfn)
++{
++      if (kvm_is_reserved_pfn(pfn))
++              return 1;
++      return get_page_unless_zero(pfn_to_page(pfn));
++}
++
+ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+                              unsigned long addr, bool *async,
+                              bool write_fault, bool *writable,
+@@ -1642,13 +1649,21 @@ static int hva_to_pfn_remapped(struct vm
+        * Whoever called remap_pfn_range is also going to call e.g.
+        * unmap_mapping_range before the underlying pages are freed,
+        * causing a call to our MMU notifier.
++       *
++       * Certain IO or PFNMAP mappings can be backed with valid
++       * struct pages, but be allocated without refcounting e.g.,
++       * tail pages of non-compound higher order allocations, which
++       * would then underflow the refcount when the caller does the
++       * required put_page. Don't allow those pages here.
+        */ 
+-      kvm_get_pfn(pfn);
++      if (!kvm_try_get_pfn(pfn))
++              r = -EFAULT;
+ out:
+       pte_unmap_unlock(ptep, ptl);
+       *p_pfn = pfn;
+-      return 0;
++
++      return r;
+ }
+ /*
index fa88c8ffd2e47d5f5af3f4e535ac5e7a8bbacf6a..7e49bce1b81ab8f1e8fe60c8fc4fdd6ebc004972 100644 (file)
@@ -38,3 +38,7 @@ net-ll_temac-add-memory-barriers-for-tx-bd-access.patch
 net-ll_temac-avoid-ndo_start_xmit-returning-netdev_t.patch
 pinctrl-stm32-fix-the-reported-number-of-gpio-lines-.patch
 nilfs2-fix-memory-leak-in-nilfs_sysfs_delete_device_.patch
+kvm-do-not-allow-mapping-valid-but-non-reference-counted-pages.patch
+i2c-robotfuzz-osif-fix-control-request-directions.patch
+kthread_worker-split-code-for-canceling-the-delayed-work-timer.patch
+kthread-prevent-deadlock-when-kthread_mod_delayed_work-races-with-kthread_cancel_delayed_work_sync.patch