]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Dec 2017 17:48:48 +0000 (18:48 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Dec 2017 17:48:48 +0000 (18:48 +0100)
added patches:
ipmi-stop-timers-before-cleaning-up-the-module.patch
more-bio_map_user_iov-leak-fixes.patch
s390-always-save-and-restore-all-registers-on-context-switch.patch

queue-4.4/ipmi-stop-timers-before-cleaning-up-the-module.patch [new file with mode: 0644]
queue-4.4/more-bio_map_user_iov-leak-fixes.patch [new file with mode: 0644]
queue-4.4/s390-always-save-and-restore-all-registers-on-context-switch.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/ipmi-stop-timers-before-cleaning-up-the-module.patch b/queue-4.4/ipmi-stop-timers-before-cleaning-up-the-module.patch
new file mode 100644 (file)
index 0000000..1cef516
--- /dev/null
@@ -0,0 +1,262 @@
+From 4f7f5551a760eb0124267be65763008169db7087 Mon Sep 17 00:00:00 2001
+From: Masamitsu Yamazaki <m-yamazaki@ah.jp.nec.com>
+Date: Wed, 15 Nov 2017 07:33:14 +0000
+Subject: ipmi: Stop timers before cleaning up the module
+
+From: Masamitsu Yamazaki <m-yamazaki@ah.jp.nec.com>
+
+commit 4f7f5551a760eb0124267be65763008169db7087 upstream.
+
+System may crash after unloading ipmi_si.ko module
+because a timer may remain and fire after the module cleaned up resources.
+
+cleanup_one_si() contains the following processing.
+
+        /*
+         * Make sure that interrupts, the timer and the thread are
+         * stopped and will not run again.
+         */
+        if (to_clean->irq_cleanup)
+                to_clean->irq_cleanup(to_clean);
+        wait_for_timer_and_thread(to_clean);
+
+        /*
+         * Timeouts are stopped, now make sure the interrupts are off
+         * in the BMC.  Note that timers and CPU interrupts are off,
+         * so no need for locks.
+         */
+        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+                poll(to_clean);
+                schedule_timeout_uninterruptible(1);
+        }
+
+si_state changes as following in the while loop calling poll(to_clean).
+
+  SI_GETTING_MESSAGES
+    => SI_CHECKING_ENABLES
+     => SI_SETTING_ENABLES
+      => SI_GETTING_EVENTS
+       => SI_NORMAL
+
+As written in the code comments above,
+timers are expected to stop before the polling loop and not to run again.
+But the timer is set again in the following process
+when si_state becomes SI_SETTING_ENABLES.
+
+  => poll
+     => smi_event_handler
+       => handle_transaction_done
+          // smi_info->si_state == SI_SETTING_ENABLES
+         => start_getting_events
+           => start_new_msg
+            => smi_mod_timer
+              => mod_timer
+
+As a result, before the timer set in start_new_msg() expires,
+the polling loop may see si_state becoming SI_NORMAL
+and the module clean-up finishes.
+
+For example, hard LOCKUP and panic occurred as following.
+smi_timeout was called after smi_event_handler,
+kcs_event and hangs at port_inb()
+trying to access I/O port after release.
+
+    [exception RIP: port_inb+19]
+    RIP: ffffffffc0473053  RSP: ffff88069fdc3d80  RFLAGS: 00000006
+    RAX: ffff8806800f8e00  RBX: ffff880682bd9400  RCX: 0000000000000000
+    RDX: 0000000000000ca3  RSI: 0000000000000ca3  RDI: ffff8806800f8e40
+    RBP: ffff88069fdc3d80   R8: ffffffff81d86dfc   R9: ffffffff81e36426
+    R10: 00000000000509f0  R11: 0000000000100000  R12: 0000000000]:000000
+    R13: 0000000000000000  R14: 0000000000000246  R15: ffff8806800f8e00
+    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0000
+ --- <NMI exception stack> ---
+
+To fix the problem I defined a flag, timer_can_start,
+as member of struct smi_info.
+The flag is enabled immediately after initializing the timer
+and disabled immediately before waiting for timer deletion.
+
+Fixes: 0cfec916e86d ("ipmi: Start the timer and thread on internal msgs")
+Signed-off-by: Yamazaki Masamitsu <m-yamazaki@ah.jp.nec.com>
+[Adjusted for recent changes in the driver.]
+[Some fairly major changes went into the IPMI driver in 4.15, so this
+ required a backport as the code had changed and moved to a different
+ file.  The 4.14 version of this patch moved some code under an
+ if statement and there was an API change causing it to not apply to
+ 4.4-4.6.]
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+This is for kernel version 4.4-4.6 only.  Code and API changes required
+backporting.  There is another version for 4.7-4.13 and another for
+4.14 coming, too.  Bug was introduced in 4.4.
+
+ drivers/char/ipmi/ipmi_si_intf.c |   44 ++++++++++++++++++++-------------------
+ 1 file changed, 23 insertions(+), 21 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -239,6 +239,9 @@ struct smi_info {
+       /* The timer for this si. */
+       struct timer_list   si_timer;
++      /* This flag is set, if the timer can be set */
++      bool                timer_can_start;
++
+       /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+       bool                timer_running;
+@@ -414,6 +417,8 @@ static enum si_sm_result start_next_msg(
+ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+ {
++      if (!smi_info->timer_can_start)
++              return;
+       smi_info->last_timeout_jiffies = jiffies;
+       mod_timer(&smi_info->si_timer, new_val);
+       smi_info->timer_running = true;
+@@ -433,21 +438,18 @@ static void start_new_msg(struct smi_inf
+       smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+ }
+-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
++static void start_check_enables(struct smi_info *smi_info)
+ {
+       unsigned char msg[2];
+       msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+       msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+-      if (start_timer)
+-              start_new_msg(smi_info, msg, 2);
+-      else
+-              smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
++      start_new_msg(smi_info, msg, 2);
+       smi_info->si_state = SI_CHECKING_ENABLES;
+ }
+-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
++static void start_clear_flags(struct smi_info *smi_info)
+ {
+       unsigned char msg[3];
+@@ -456,10 +458,7 @@ static void start_clear_flags(struct smi
+       msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+       msg[2] = WDT_PRE_TIMEOUT_INT;
+-      if (start_timer)
+-              start_new_msg(smi_info, msg, 3);
+-      else
+-              smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
++      start_new_msg(smi_info, msg, 3);
+       smi_info->si_state = SI_CLEARING_FLAGS;
+ }
+@@ -494,11 +493,11 @@ static void start_getting_events(struct
+  * Note that we cannot just use disable_irq(), since the interrupt may
+  * be shared.
+  */
+-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
++static inline bool disable_si_irq(struct smi_info *smi_info)
+ {
+       if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+               smi_info->interrupt_disabled = true;
+-              start_check_enables(smi_info, start_timer);
++              start_check_enables(smi_info);
+               return true;
+       }
+       return false;
+@@ -508,7 +507,7 @@ static inline bool enable_si_irq(struct
+ {
+       if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+               smi_info->interrupt_disabled = false;
+-              start_check_enables(smi_info, true);
++              start_check_enables(smi_info);
+               return true;
+       }
+       return false;
+@@ -526,7 +525,7 @@ static struct ipmi_smi_msg *alloc_msg_ha
+       msg = ipmi_alloc_smi_msg();
+       if (!msg) {
+-              if (!disable_si_irq(smi_info, true))
++              if (!disable_si_irq(smi_info))
+                       smi_info->si_state = SI_NORMAL;
+       } else if (enable_si_irq(smi_info)) {
+               ipmi_free_smi_msg(msg);
+@@ -542,7 +541,7 @@ static void handle_flags(struct smi_info
+               /* Watchdog pre-timeout */
+               smi_inc_stat(smi_info, watchdog_pretimeouts);
+-              start_clear_flags(smi_info, true);
++              start_clear_flags(smi_info);
+               smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+               if (smi_info->intf)
+                       ipmi_smi_watchdog_pretimeout(smi_info->intf);
+@@ -925,7 +924,7 @@ static enum si_sm_result smi_event_handl
+                * disable and messages disabled.
+                */
+               if (smi_info->supports_event_msg_buff || smi_info->irq) {
+-                      start_check_enables(smi_info, true);
++                      start_check_enables(smi_info);
+               } else {
+                       smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+                       if (!smi_info->curr_msg)
+@@ -1232,6 +1231,7 @@ static int smi_start_processing(void
+       /* Set up the timer that drives the interface. */
+       setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
++      new_smi->timer_can_start = true;
+       smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+       /* Try to claim any interrupts. */
+@@ -3434,10 +3434,12 @@ static void check_for_broken_irqs(struct
+       check_set_rcv_irq(smi_info);
+ }
+-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
++static inline void stop_timer_and_thread(struct smi_info *smi_info)
+ {
+       if (smi_info->thread != NULL)
+               kthread_stop(smi_info->thread);
++
++      smi_info->timer_can_start = false;
+       if (smi_info->timer_running)
+               del_timer_sync(&smi_info->si_timer);
+ }
+@@ -3635,7 +3637,7 @@ static int try_smi_init(struct smi_info
+        * Start clearing the flags before we enable interrupts or the
+        * timer to avoid racing with the timer.
+        */
+-      start_clear_flags(new_smi, false);
++      start_clear_flags(new_smi);
+       /*
+        * IRQ is defined to be set when non-zero.  req_events will
+@@ -3713,7 +3715,7 @@ static int try_smi_init(struct smi_info
+       return 0;
+  out_err_stop_timer:
+-      wait_for_timer_and_thread(new_smi);
++      stop_timer_and_thread(new_smi);
+  out_err:
+       new_smi->interrupt_disabled = true;
+@@ -3919,7 +3921,7 @@ static void cleanup_one_si(struct smi_in
+        */
+       if (to_clean->irq_cleanup)
+               to_clean->irq_cleanup(to_clean);
+-      wait_for_timer_and_thread(to_clean);
++      stop_timer_and_thread(to_clean);
+       /*
+        * Timeouts are stopped, now make sure the interrupts are off
+@@ -3930,7 +3932,7 @@ static void cleanup_one_si(struct smi_in
+               poll(to_clean);
+               schedule_timeout_uninterruptible(1);
+       }
+-      disable_si_irq(to_clean, false);
++      disable_si_irq(to_clean);
+       while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+               poll(to_clean);
+               schedule_timeout_uninterruptible(1);
diff --git a/queue-4.4/more-bio_map_user_iov-leak-fixes.patch b/queue-4.4/more-bio_map_user_iov-leak-fixes.patch
new file mode 100644 (file)
index 0000000..cab75d9
--- /dev/null
@@ -0,0 +1,62 @@
+From 2b04e8f6bbb196cab4b232af0f8d48ff2c7a8058 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sat, 23 Sep 2017 15:51:23 -0400
+Subject: more bio_map_user_iov() leak fixes
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 2b04e8f6bbb196cab4b232af0f8d48ff2c7a8058 upstream.
+
+we need to take care of failure exit as well - pages already
+in bio should be dropped by analogue of bio_unmap_pages(),
+since their refcounts had been bumped only once per reference
+in bio.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+Fixed conflicts.
+Intended for v4.4.y. Does not apply to v3.18.y or older kernels.
+
+ block/bio.c |   14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1268,6 +1268,7 @@ struct bio *bio_map_user_iov(struct requ
+       int ret, offset;
+       struct iov_iter i;
+       struct iovec iov;
++      struct bio_vec *bvec;
+       iov_for_each(iov, i, *iter) {
+               unsigned long uaddr = (unsigned long) iov.iov_base;
+@@ -1312,7 +1313,12 @@ struct bio *bio_map_user_iov(struct requ
+               ret = get_user_pages_fast(uaddr, local_nr_pages,
+                               (iter->type & WRITE) != WRITE,
+                               &pages[cur_page]);
+-              if (ret < local_nr_pages) {
++              if (unlikely(ret < local_nr_pages)) {
++                      for (j = cur_page; j < page_limit; j++) {
++                              if (!pages[j])
++                                      break;
++                              put_page(pages[j]);
++                      }
+                       ret = -EFAULT;
+                       goto out_unmap;
+               }
+@@ -1374,10 +1380,8 @@ struct bio *bio_map_user_iov(struct requ
+       return bio;
+  out_unmap:
+-      for (j = 0; j < nr_pages; j++) {
+-              if (!pages[j])
+-                      break;
+-              page_cache_release(pages[j]);
++      bio_for_each_segment_all(bvec, bio, j) {
++              put_page(bvec->bv_page);
+       }
+  out:
+       kfree(pages);
diff --git a/queue-4.4/s390-always-save-and-restore-all-registers-on-context-switch.patch b/queue-4.4/s390-always-save-and-restore-all-registers-on-context-switch.patch
new file mode 100644 (file)
index 0000000..3d1a100
--- /dev/null
@@ -0,0 +1,58 @@
+From fbbd7f1a51965b50dd12924841da0d478f3da71b Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 20 Nov 2017 12:38:44 +0100
+Subject: s390: always save and restore all registers on context switch
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit fbbd7f1a51965b50dd12924841da0d478f3da71b upstream.
+
+The switch_to() macro has an optimization to avoid saving and
+restoring register contents that aren't needed for kernel threads.
+
+There is however the possibility that a kernel thread execve's a user
+space program. In such a case the execve'd process can partially see
+the contents of the previous process, which shouldn't be allowed.
+
+To avoid this, simply always save and restore register contents on
+context switch.
+
+Cc: <stable@vger.kernel.org> # v2.6.37+
+Fixes: fdb6d070effba ("switch_to: dont restore/save access & fpu regs for kernel threads")
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/switch_to.h |   19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+--- a/arch/s390/include/asm/switch_to.h
++++ b/arch/s390/include/asm/switch_to.h
+@@ -29,17 +29,16 @@ static inline void restore_access_regs(u
+ }
+ #define switch_to(prev,next,last) do {                                        \
+-      if (prev->mm) {                                                 \
+-              save_fpu_regs();                                        \
+-              save_access_regs(&prev->thread.acrs[0]);                \
+-              save_ri_cb(prev->thread.ri_cb);                         \
+-      }                                                               \
++      /* save_fpu_regs() sets the CIF_FPU flag, which enforces        \
++       * a restore of the floating point / vector registers as        \
++       * soon as the next task returns to user space                  \
++       */                                                             \
++      save_fpu_regs();                                                \
++      save_access_regs(&prev->thread.acrs[0]);                        \
++      save_ri_cb(prev->thread.ri_cb);                                 \
+       update_cr_regs(next);                                           \
+-      if (next->mm) {                                                 \
+-              set_cpu_flag(CIF_FPU);                                  \
+-              restore_access_regs(&next->thread.acrs[0]);             \
+-              restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
+-      }                                                               \
++      restore_access_regs(&next->thread.acrs[0]);                     \
++      restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);          \
+       prev = __switch_to(prev,next);                                  \
+ } while (0)
index 16eb42c020ec1a0bfe3deadbbd44e4ff352d487f..12ab41e2bdf27ba91d5912fc82a3666b6c0657ba 100644 (file)
@@ -92,3 +92,6 @@ ib-mlx5-assign-send-cq-and-recv-cq-of-umr-qp.patch
 afs-connect-up-the-cb.probeuuid.patch
 ipvlan-fix-ipv6-outbound-device.patch
 audit-ensure-that-audit-1-actually-enables-audit-for-pid-1.patch
+ipmi-stop-timers-before-cleaning-up-the-module.patch
+s390-always-save-and-restore-all-registers-on-context-switch.patch
+more-bio_map_user_iov-leak-fixes.patch