]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jun 2023 18:45:09 +0000 (20:45 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jun 2023 18:45:09 +0000 (20:45 +0200)
added patches:
ipmi-make-the-smi-watcher-be-disabled-immediately-when-not-needed.patch
ipmi-move-message-error-checking-to-avoid-deadlock.patch
nilfs2-reject-devices-with-insufficient-block-count.patch
x86-purgatory-remove-pgo-flags.patch

queue-4.19/ipmi-make-the-smi-watcher-be-disabled-immediately-when-not-needed.patch [new file with mode: 0644]
queue-4.19/ipmi-move-message-error-checking-to-avoid-deadlock.patch [new file with mode: 0644]
queue-4.19/nilfs2-reject-devices-with-insufficient-block-count.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-purgatory-remove-pgo-flags.patch [new file with mode: 0644]

diff --git a/queue-4.19/ipmi-make-the-smi-watcher-be-disabled-immediately-when-not-needed.patch b/queue-4.19/ipmi-make-the-smi-watcher-be-disabled-immediately-when-not-needed.patch
new file mode 100644 (file)
index 0000000..293019a
--- /dev/null
@@ -0,0 +1,447 @@
+From e1891cffd4c4896a899337a243273f0e23c028df Mon Sep 17 00:00:00 2001
+From: Corey Minyard <cminyard@mvista.com>
+Date: Wed, 24 Oct 2018 15:17:04 -0500
+Subject: ipmi: Make the smi watcher be disabled immediately when not needed
+
+From: Corey Minyard <cminyard@mvista.com>
+
+commit e1891cffd4c4896a899337a243273f0e23c028df upstream.
+
+The code to tell the lower layer to enable or disable watching for
+certain things was lazy in disabling, it waited until a timer tick
+to see if a disable was necessary.  Not a really big deal, but it
+could be improved.
+
+Modify the code to enable and disable watching immediately and don't
+do it from the background timer any more.
+
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Kamlakant Patel <kamlakant.patel@cavium.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |  164 +++++++++++++++++++-----------------
+ drivers/char/ipmi/ipmi_si_intf.c    |    2 
+ drivers/char/ipmi/ipmi_ssif.c       |    2 
+ include/linux/ipmi_smi.h            |   17 ---
+ 4 files changed, 96 insertions(+), 89 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -541,15 +541,20 @@ struct ipmi_smi {
+       atomic_t         event_waiters;
+       unsigned int     ticks_to_req_ev;
++      spinlock_t       watch_lock; /* For dealing with watch stuff below. */
++
+       /* How many users are waiting for commands? */
+-      atomic_t         command_waiters;
++      unsigned int     command_waiters;
+       /* How many users are waiting for watchdogs? */
+-      atomic_t         watchdog_waiters;
++      unsigned int     watchdog_waiters;
++
++      /* How many users are waiting for message responses? */
++      unsigned int     response_waiters;
+       /*
+        * Tells what the lower layer has last been asked to watch for,
+-       * messages and/or watchdogs.  Protected by xmit_msgs_lock.
++       * messages and/or watchdogs.  Protected by watch_lock.
+        */
+       unsigned int     last_watch_mask;
+@@ -945,6 +950,64 @@ static void deliver_err_response(struct
+       deliver_local_response(intf, msg);
+ }
++static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
++{
++      unsigned long iflags;
++
++      if (!intf->handlers->set_need_watch)
++              return;
++
++      spin_lock_irqsave(&intf->watch_lock, iflags);
++      if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
++              intf->response_waiters++;
++
++      if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
++              intf->watchdog_waiters++;
++
++      if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
++              intf->command_waiters++;
++
++      if ((intf->last_watch_mask & flags) != flags) {
++              intf->last_watch_mask |= flags;
++              intf->handlers->set_need_watch(intf->send_info,
++                                             intf->last_watch_mask);
++      }
++      spin_unlock_irqrestore(&intf->watch_lock, iflags);
++}
++
++static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
++{
++      unsigned long iflags;
++
++      if (!intf->handlers->set_need_watch)
++              return;
++
++      spin_lock_irqsave(&intf->watch_lock, iflags);
++      if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
++              intf->response_waiters--;
++
++      if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
++              intf->watchdog_waiters--;
++
++      if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
++              intf->command_waiters--;
++
++      flags = 0;
++      if (intf->response_waiters)
++              flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
++      if (intf->watchdog_waiters)
++              flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
++      if (intf->command_waiters)
++              flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
++
++      if (intf->last_watch_mask != flags) {
++              intf->last_watch_mask = flags;
++              intf->handlers->set_need_watch(intf->send_info,
++                                             intf->last_watch_mask);
++      }
++      spin_unlock_irqrestore(&intf->watch_lock, iflags);
++}
++
+ /*
+  * Find the next sequence number not being used and add the given
+  * message with the given timeout to the sequence table.  This must be
+@@ -988,6 +1051,7 @@ static int intf_next_seq(struct ipmi_smi
+               *seq = i;
+               *seqid = intf->seq_table[i].seqid;
+               intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
++              smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+               need_waiter(intf);
+       } else {
+               rv = -EAGAIN;
+@@ -1026,6 +1090,7 @@ static int intf_find_seq(struct ipmi_smi
+                               && (ipmi_addr_equal(addr, &msg->addr))) {
+                       *recv_msg = msg;
+                       intf->seq_table[seq].inuse = 0;
++                      smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+                       rv = 0;
+               }
+       }
+@@ -1087,6 +1152,7 @@ static int intf_err_seq(struct ipmi_smi
+               struct seq_table *ent = &intf->seq_table[seq];
+               ent->inuse = 0;
++              smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+               msg = ent->recv_msg;
+               rv = 0;
+       }
+@@ -1098,30 +1164,6 @@ static int intf_err_seq(struct ipmi_smi
+       return rv;
+ }
+-/* Must be called with xmit_msgs_lock held. */
+-static void smi_tell_to_watch(struct ipmi_smi *intf,
+-                            unsigned int flags,
+-                            struct ipmi_smi_msg *smi_msg)
+-{
+-      if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
+-              if (!smi_msg)
+-                      return;
+-
+-              if (!smi_msg->needs_response)
+-                      return;
+-      }
+-
+-      if (!intf->handlers->set_need_watch)
+-              return;
+-
+-      if ((intf->last_watch_mask & flags) == flags)
+-              return;
+-
+-      intf->last_watch_mask |= flags;
+-      intf->handlers->set_need_watch(intf->send_info,
+-                                     intf->last_watch_mask);
+-}
+-
+ static void free_user_work(struct work_struct *work)
+ {
+       struct ipmi_user *user = container_of(work, struct ipmi_user,
+@@ -1198,12 +1240,9 @@ int ipmi_create_user(unsigned int
+       spin_lock_irqsave(&intf->seq_lock, flags);
+       list_add_rcu(&new_user->link, &intf->users);
+       spin_unlock_irqrestore(&intf->seq_lock, flags);
+-      if (handler->ipmi_watchdog_pretimeout) {
++      if (handler->ipmi_watchdog_pretimeout)
+               /* User wants pretimeouts, so make sure to watch for them. */
+-              if (atomic_inc_return(&intf->watchdog_waiters) == 1)
+-                      smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
+-                                        NULL);
+-      }
++              smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+       srcu_read_unlock(&ipmi_interfaces_srcu, index);
+       *user = new_user;
+       return 0;
+@@ -1276,7 +1315,7 @@ static void _ipmi_destroy_user(struct ip
+               user->handler->shutdown(user->handler_data);
+       if (user->handler->ipmi_watchdog_pretimeout)
+-              atomic_dec(&intf->watchdog_waiters);
++              smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
+       if (user->gets_events)
+               atomic_dec(&intf->event_waiters);
+@@ -1289,6 +1328,7 @@ static void _ipmi_destroy_user(struct ip
+               if (intf->seq_table[i].inuse
+                   && (intf->seq_table[i].recv_msg->user == user)) {
+                       intf->seq_table[i].inuse = 0;
++                      smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+                       ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
+               }
+       }
+@@ -1634,8 +1674,7 @@ int ipmi_register_for_cmd(struct ipmi_us
+               goto out_unlock;
+       }
+-      if (atomic_inc_return(&intf->command_waiters) == 1)
+-              smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);
++      smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+       list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+@@ -1685,7 +1724,7 @@ int ipmi_unregister_for_cmd(struct ipmi_
+       synchronize_rcu();
+       release_ipmi_user(user, index);
+       while (rcvrs) {
+-              atomic_dec(&intf->command_waiters);
++              smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
+               rcvr = rcvrs;
+               rcvrs = rcvr->next;
+               kfree(rcvr);
+@@ -1813,8 +1852,6 @@ static void smi_send(struct ipmi_smi *in
+               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+       smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+-      smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);
+-
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+@@ -2014,9 +2051,6 @@ static int i_ipmi_req_ipmb(struct ipmi_s
+                               ipmb_seq, broadcast,
+                               source_address, source_lun);
+-              /* We will be getting a response in the BMC message queue. */
+-              smi_msg->needs_response = true;
+-
+               /*
+                * Copy the message into the recv message data, so we
+                * can retransmit it later if necessary.
+@@ -2204,7 +2238,6 @@ static int i_ipmi_request(struct ipmi_us
+                       goto out;
+               }
+       }
+-      smi_msg->needs_response = false;
+       rcu_read_lock();
+       if (intf->in_shutdown) {
+@@ -3425,9 +3458,8 @@ int ipmi_add_smi(struct module         *
+       INIT_LIST_HEAD(&intf->xmit_msgs);
+       INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+       spin_lock_init(&intf->events_lock);
++      spin_lock_init(&intf->watch_lock);
+       atomic_set(&intf->event_waiters, 0);
+-      atomic_set(&intf->watchdog_waiters, 0);
+-      atomic_set(&intf->command_waiters, 0);
+       intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+       INIT_LIST_HEAD(&intf->waiting_events);
+       intf->waiting_events_count = 0;
+@@ -4447,8 +4479,6 @@ static void smi_recv_tasklet(unsigned lo
+               }
+       }
+-      smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);
+-
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+       if (newmsg)
+@@ -4576,7 +4606,7 @@ static void check_msg_timeout(struct ipm
+                             struct list_head *timeouts,
+                             unsigned long timeout_period,
+                             int slot, unsigned long *flags,
+-                            unsigned int *watch_mask)
++                            bool *need_timer)
+ {
+       struct ipmi_recv_msg *msg;
+@@ -4588,13 +4618,14 @@ static void check_msg_timeout(struct ipm
+       if (timeout_period < ent->timeout) {
+               ent->timeout -= timeout_period;
+-              *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
++              *need_timer = true;
+               return;
+       }
+       if (ent->retries_left == 0) {
+               /* The message has used all its retries. */
+               ent->inuse = 0;
++              smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
+               msg = ent->recv_msg;
+               list_add_tail(&msg->link, timeouts);
+               if (ent->broadcast)
+@@ -4607,7 +4638,7 @@ static void check_msg_timeout(struct ipm
+               struct ipmi_smi_msg *smi_msg;
+               /* More retries, send again. */
+-              *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
++              *need_timer = true;
+               /*
+                * Start with the max timer, set to normal timer after
+@@ -4652,20 +4683,20 @@ static void check_msg_timeout(struct ipm
+       }
+ }
+-static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
+-                                       unsigned long timeout_period)
++static bool ipmi_timeout_handler(struct ipmi_smi *intf,
++                               unsigned long timeout_period)
+ {
+       struct list_head     timeouts;
+       struct ipmi_recv_msg *msg, *msg2;
+       unsigned long        flags;
+       int                  i;
+-      unsigned int         watch_mask = 0;
++      bool                 need_timer = false;
+       if (!intf->bmc_registered) {
+               kref_get(&intf->refcount);
+               if (!schedule_work(&intf->bmc_reg_work)) {
+                       kref_put(&intf->refcount, intf_free);
+-                      watch_mask |= IPMI_WATCH_MASK_INTERNAL;
++                      need_timer = true;
+               }
+       }
+@@ -4685,7 +4716,7 @@ static unsigned int ipmi_timeout_handler
+       for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+               check_msg_timeout(intf, &intf->seq_table[i],
+                                 &timeouts, timeout_period, i,
+-                                &flags, &watch_mask);
++                                &flags, &need_timer);
+       spin_unlock_irqrestore(&intf->seq_lock, flags);
+       list_for_each_entry_safe(msg, msg2, &timeouts, link)
+@@ -4716,7 +4747,7 @@ static unsigned int ipmi_timeout_handler
+       tasklet_schedule(&intf->recv_tasklet);
+-      return watch_mask;
++      return need_timer;
+ }
+ static void ipmi_request_event(struct ipmi_smi *intf)
+@@ -4736,9 +4767,8 @@ static atomic_t stop_operation;
+ static void ipmi_timeout(struct timer_list *unused)
+ {
+       struct ipmi_smi *intf;
+-      unsigned int watch_mask = 0;
++      bool need_timer = false;
+       int index;
+-      unsigned long flags;
+       if (atomic_read(&stop_operation))
+               return;
+@@ -4751,28 +4781,14 @@ static void ipmi_timeout(struct timer_li
+                               ipmi_request_event(intf);
+                               intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+                       }
+-                      watch_mask |= IPMI_WATCH_MASK_INTERNAL;
++                      need_timer = true;
+               }
+-              if (atomic_read(&intf->watchdog_waiters))
+-                      watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
+-
+-              if (atomic_read(&intf->command_waiters))
+-                      watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
+-
+-              watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+-
+-              spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+-              if (watch_mask != intf->last_watch_mask &&
+-                                      intf->handlers->set_need_watch)
+-                      intf->handlers->set_need_watch(intf->send_info,
+-                                                     watch_mask);
+-              intf->last_watch_mask = watch_mask;
+-              spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
++              need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+       }
+       srcu_read_unlock(&ipmi_interfaces_srcu, index);
+-      if (watch_mask)
++      if (need_timer)
+               mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+ }
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1079,7 +1079,7 @@ static void set_need_watch(void *send_in
+       unsigned long flags;
+       int enable;
+-      enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL);
++      enable = !!watch_mask;
+       atomic_set(&smi_info->need_watch, enable);
+       spin_lock_irqsave(&smi_info->si_lock, flags);
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -1159,7 +1159,7 @@ static void ssif_set_need_watch(void *se
+       if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
+               timeout = SSIF_WATCH_MSG_TIMEOUT;
+-      else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL)
++      else if (watch_mask)
+               timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
+       flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+--- a/include/linux/ipmi_smi.h
++++ b/include/linux/ipmi_smi.h
+@@ -32,14 +32,11 @@ typedef struct ipmi_smi *ipmi_smi_t;
+ /*
+  * Flags for set_check_watch() below.  Tells if the SMI should be
+- * waiting for watchdog timeouts, commands and/or messages.  There is
+- * also an internal flag for the message handler, SMIs should ignore
+- * it.
++ * waiting for watchdog timeouts, commands and/or messages.
+  */
+-#define IPMI_WATCH_MASK_INTERNAL      (1 << 0)
+-#define IPMI_WATCH_MASK_CHECK_MESSAGES        (1 << 1)
+-#define IPMI_WATCH_MASK_CHECK_WATCHDOG        (1 << 2)
+-#define IPMI_WATCH_MASK_CHECK_COMMANDS        (1 << 3)
++#define IPMI_WATCH_MASK_CHECK_MESSAGES        (1 << 0)
++#define IPMI_WATCH_MASK_CHECK_WATCHDOG        (1 << 1)
++#define IPMI_WATCH_MASK_CHECK_COMMANDS        (1 << 2)
+ /*
+  * Messages to/from the lower layer.  The smi interface will take one
+@@ -67,12 +64,6 @@ struct ipmi_smi_msg {
+       unsigned char rsp[IPMI_MAX_MSG_LENGTH];
+       /*
+-       * There should be a response message coming back in the BMC
+-       * message queue.
+-       */
+-      bool needs_response;
+-
+-      /*
+        * Will be called when the system is done with the message
+        * (presumably to free it).
+        */
diff --git a/queue-4.19/ipmi-move-message-error-checking-to-avoid-deadlock.patch b/queue-4.19/ipmi-move-message-error-checking-to-avoid-deadlock.patch
new file mode 100644 (file)
index 0000000..924eb3e
--- /dev/null
@@ -0,0 +1,190 @@
+From 383035211c79d4d98481a09ad429b31c7dbf22bd Mon Sep 17 00:00:00 2001
+From: Tony Camuso <tcamuso@redhat.com>
+Date: Thu, 22 Aug 2019 08:24:53 -0400
+Subject: ipmi: move message error checking to avoid deadlock
+
+From: Tony Camuso <tcamuso@redhat.com>
+
+commit 383035211c79d4d98481a09ad429b31c7dbf22bd upstream.
+
+V1->V2: in handle_one_rcv_msg, if data_size > 2, set requeue to zero and
+        goto out instead of calling ipmi_free_msg.
+        Kosuke Tatsukawa <tatsu@ab.jp.nec.com>
+
+In the source stack trace below, function set_need_watch tries to
+take out the same si_lock that was taken earlier by ipmi_thread.
+
+ipmi_thread() [drivers/char/ipmi/ipmi_si_intf.c:995]
+ smi_event_handler() [drivers/char/ipmi/ipmi_si_intf.c:765]
+  handle_transaction_done() [drivers/char/ipmi/ipmi_si_intf.c:555]
+   deliver_recv_msg() [drivers/char/ipmi/ipmi_si_intf.c:283]
+    ipmi_smi_msg_received() [drivers/char/ipmi/ipmi_msghandler.c:4503]
+     intf_err_seq() [drivers/char/ipmi/ipmi_msghandler.c:1149]
+      smi_remove_watch() [drivers/char/ipmi/ipmi_msghandler.c:999]
+       set_need_watch() [drivers/char/ipmi/ipmi_si_intf.c:1066]
+
+Upstream commit e1891cffd4c4896a899337a243273f0e23c028df adds code to
+ipmi_smi_msg_received() to call smi_remove_watch() via intf_err_seq()
+and this seems to be causing the deadlock.
+
+commit e1891cffd4c4896a899337a243273f0e23c028df
+Author: Corey Minyard <cminyard@mvista.com>
+Date:   Wed Oct 24 15:17:04 2018 -0500
+    ipmi: Make the smi watcher be disabled immediately when not needed
+
+The fix is to put all messages in the queue and move the message
+checking code out of ipmi_smi_msg_received and into handle_one_recv_msg,
+which processes the message checking after ipmi_thread releases its
+locks.
+
+Additionally,Kosuke Tatsukawa <tatsu@ab.jp.nec.com> reported that
+handle_new_recv_msgs calls ipmi_free_msg when handle_one_rcv_msg returns
+zero, so that the call to ipmi_free_msg in handle_one_rcv_msg introduced
+another panic when "ipmitool sensor list" was run in a loop. He
+submitted this part of the patch.
+
++free_msg:
++               requeue = 0;
++               goto out;
+
+Reported by: Osamu Samukawa <osa-samukawa@tg.jp.nec.com>
+Characterized by: Kosuke Tatsukawa <tatsu@ab.jp.nec.com>
+Signed-off-by: Tony Camuso <tcamuso@redhat.com>
+Fixes: e1891cffd4c4 ("ipmi: Make the smi watcher be disabled immediately when not needed")
+Cc: stable@vger.kernel.org # 5.1
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |  114 ++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 57 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4239,7 +4239,53 @@ static int handle_one_recv_msg(struct ip
+       int chan;
+       ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+-      if (msg->rsp_size < 2) {
++
++      if ((msg->data_size >= 2)
++          && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
++          && (msg->data[1] == IPMI_SEND_MSG_CMD)
++          && (msg->user_data == NULL)) {
++
++              if (intf->in_shutdown)
++                      goto free_msg;
++
++              /*
++               * This is the local response to a command send, start
++               * the timer for these.  The user_data will not be
++               * NULL if this is a response send, and we will let
++               * response sends just go through.
++               */
++
++              /*
++               * Check for errors, if we get certain errors (ones
++               * that mean basically we can try again later), we
++               * ignore them and start the timer.  Otherwise we
++               * report the error immediately.
++               */
++              if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
++                  && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
++                  && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
++                  && (msg->rsp[2] != IPMI_BUS_ERR)
++                  && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
++                      int ch = msg->rsp[3] & 0xf;
++                      struct ipmi_channel *chans;
++
++                      /* Got an error sending the message, handle it. */
++
++                      chans = READ_ONCE(intf->channel_list)->c;
++                      if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
++                          || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
++                              ipmi_inc_stat(intf, sent_lan_command_errs);
++                      else
++                              ipmi_inc_stat(intf, sent_ipmb_command_errs);
++                      intf_err_seq(intf, msg->msgid, msg->rsp[2]);
++              } else
++                      /* The message was sent, start the timer. */
++                      intf_start_seq_timer(intf, msg->msgid);
++free_msg:
++              requeue = 0;
++              goto out;
++
++      } else if (msg->rsp_size < 2) {
+               /* Message is too small to be correct. */
+               dev_warn(intf->si_dev,
+                        PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
+@@ -4496,62 +4542,16 @@ void ipmi_smi_msg_received(struct ipmi_s
+       unsigned long flags = 0; /* keep us warning-free. */
+       int run_to_completion = intf->run_to_completion;
+-      if ((msg->data_size >= 2)
+-          && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+-          && (msg->data[1] == IPMI_SEND_MSG_CMD)
+-          && (msg->user_data == NULL)) {
+-
+-              if (intf->in_shutdown)
+-                      goto free_msg;
+-
+-              /*
+-               * This is the local response to a command send, start
+-               * the timer for these.  The user_data will not be
+-               * NULL if this is a response send, and we will let
+-               * response sends just go through.
+-               */
+-
+-              /*
+-               * Check for errors, if we get certain errors (ones
+-               * that mean basically we can try again later), we
+-               * ignore them and start the timer.  Otherwise we
+-               * report the error immediately.
+-               */
+-              if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+-                  && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+-                  && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+-                  && (msg->rsp[2] != IPMI_BUS_ERR)
+-                  && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+-                      int ch = msg->rsp[3] & 0xf;
+-                      struct ipmi_channel *chans;
+-
+-                      /* Got an error sending the message, handle it. */
+-
+-                      chans = READ_ONCE(intf->channel_list)->c;
+-                      if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+-                          || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+-                              ipmi_inc_stat(intf, sent_lan_command_errs);
+-                      else
+-                              ipmi_inc_stat(intf, sent_ipmb_command_errs);
+-                      intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+-              } else
+-                      /* The message was sent, start the timer. */
+-                      intf_start_seq_timer(intf, msg->msgid);
+-
+-free_msg:
+-              ipmi_free_smi_msg(msg);
+-      } else {
+-              /*
+-               * To preserve message order, we keep a queue and deliver from
+-               * a tasklet.
+-               */
+-              if (!run_to_completion)
+-                      spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+-              list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+-              if (!run_to_completion)
+-                      spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+-                                             flags);
+-      }
++      /*
++       * To preserve message order, we keep a queue and deliver from
++       * a tasklet.
++       */
++      if (!run_to_completion)
++              spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
++      list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
++      if (!run_to_completion)
++              spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
++                                     flags);
+       if (!run_to_completion)
+               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
diff --git a/queue-4.19/nilfs2-reject-devices-with-insufficient-block-count.patch b/queue-4.19/nilfs2-reject-devices-with-insufficient-block-count.patch
new file mode 100644 (file)
index 0000000..6f3c07c
--- /dev/null
@@ -0,0 +1,105 @@
+From 92c5d1b860e9581d64baca76779576c0ab0d943d Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 26 May 2023 11:13:32 +0900
+Subject: nilfs2: reject devices with insufficient block count
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 92c5d1b860e9581d64baca76779576c0ab0d943d upstream.
+
+The current sanity check for nilfs2 geometry information lacks checks for
+the number of segments stored in superblocks, so even for device images
+that have been destructively truncated or have an unusually high number of
+segments, the mount operation may succeed.
+
+This causes out-of-bounds block I/O on file system block reads or log
+writes to the segments, the latter in particular causing
+"a_ops->writepages" to repeatedly fail, resulting in sync_inodes_sb() to
+hang.
+
+Fix this issue by checking the number of segments stored in the superblock
+and avoiding mounting devices that can cause out-of-bounds accesses.  To
+eliminate the possibility of overflow when calculating the number of
+blocks required for the device from the number of segments, this also adds
+a helper function to calculate the upper bound on the number of segments
+and inserts a check using it.
+
+Link: https://lkml.kernel.org/r/20230526021332.3431-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+7d50f1e54a12ba3aeae2@syzkaller.appspotmail.com
+  Link: https://syzkaller.appspot.com/bug?extid=7d50f1e54a12ba3aeae2
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/the_nilfs.c |   44 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 43 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -375,6 +375,18 @@ unsigned long nilfs_nrsvsegs(struct the_
+                                 100));
+ }
++/**
++ * nilfs_max_segment_count - calculate the maximum number of segments
++ * @nilfs: nilfs object
++ */
++static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
++{
++      u64 max_count = U64_MAX;
++
++      do_div(max_count, nilfs->ns_blocks_per_segment);
++      return min_t(u64, max_count, ULONG_MAX);
++}
++
+ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
+ {
+       nilfs->ns_nsegments = nsegs;
+@@ -384,6 +396,8 @@ void nilfs_set_nsegments(struct the_nilf
+ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+                                  struct nilfs_super_block *sbp)
+ {
++      u64 nsegments, nblocks;
++
+       if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
+               nilfs_msg(nilfs->ns_sb, KERN_ERR,
+                         "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
+@@ -430,7 +444,35 @@ static int nilfs_store_disk_layout(struc
+               return -EINVAL;
+       }
+-      nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
++      nsegments = le64_to_cpu(sbp->s_nsegments);
++      if (nsegments > nilfs_max_segment_count(nilfs)) {
++              nilfs_msg(nilfs->ns_sb, KERN_ERR,
++                        "segment count %llu exceeds upper limit (%llu segments)",
++                        (unsigned long long)nsegments,
++                        (unsigned long long)nilfs_max_segment_count(nilfs));
++              return -EINVAL;
++      }
++
++      nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >>
++              nilfs->ns_sb->s_blocksize_bits;
++      if (nblocks) {
++              u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
++              /*
++               * To avoid failing to mount early device images without a
++               * second superblock, exclude that block count from the
++               * "min_block_count" calculation.
++               */
++
++              if (nblocks < min_block_count) {
++                      nilfs_msg(nilfs->ns_sb, KERN_ERR,
++                                "total number of segment blocks %llu exceeds device size (%llu blocks)",
++                                (unsigned long long)min_block_count,
++                                (unsigned long long)nblocks);
++                      return -EINVAL;
++              }
++      }
++
++      nilfs_set_nsegments(nilfs, nsegments);
+       nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
+       return 0;
+ }
index a9b4edb052942ba2047698f120efadee23b8ddc8..a796201a8bbe213fa644eb751ce9555a1ab7065d 100644 (file)
@@ -2,3 +2,7 @@ serial-lantiq-change-ltq_w32_mask-to-asc_update_bits.patch
 serial-lantiq-use-readl-writel-instead-of-ltq_r32-lt.patch
 serial-lantiq-do-not-swap-register-read-writes.patch
 serial-lantiq-add-missing-interrupt-ack.patch
+nilfs2-reject-devices-with-insufficient-block-count.patch
+x86-purgatory-remove-pgo-flags.patch
+ipmi-make-the-smi-watcher-be-disabled-immediately-when-not-needed.patch
+ipmi-move-message-error-checking-to-avoid-deadlock.patch
diff --git a/queue-4.19/x86-purgatory-remove-pgo-flags.patch b/queue-4.19/x86-purgatory-remove-pgo-flags.patch
new file mode 100644 (file)
index 0000000..2da15bf
--- /dev/null
@@ -0,0 +1,60 @@
+From 97b6b9cbba40a21c1d9a344d5c1991f8cfbf136e Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda <ribalda@chromium.org>
+Date: Fri, 19 May 2023 16:47:37 +0200
+Subject: x86/purgatory: remove PGO flags
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+commit 97b6b9cbba40a21c1d9a344d5c1991f8cfbf136e upstream.
+
+If profile-guided optimization is enabled, the purgatory ends up with
+multiple .text sections.  This is not supported by kexec and crashes the
+system.
+
+Link: https://lkml.kernel.org/r/20230321-kexec_clang16-v7-2-b05c520b7296@chromium.org
+Fixes: 930457057abe ("kernel/kexec_file.c: split up __kexec_load_puragory")
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Cc: <stable@vger.kernel.org>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Philipp Rudo <prudo@redhat.com>
+Cc: Ross Zwisler <zwisler@google.com>
+Cc: Simon Horman <horms@kernel.org>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Rix <trix@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Ricardo Ribalda Delgado <ribalda@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/purgatory/Makefile |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -12,6 +12,11 @@ $(obj)/string.o: $(srctree)/arch/x86/boo
+ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
+       $(call if_changed_rule,cc_o_c)
++# When profile-guided optimization is enabled, llvm emits two different
++# overlapping text sections, which is not supported by kexec. Remove profile
++# optimization flags.
++KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
++
+ LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
+ targets += purgatory.ro