--- /dev/null
+From 62855527c9516c0831d62c5d5b35192538ac3764 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Mar 2023 15:59:32 +0900
+Subject: dm verity: fix error handling for check_at_most_once on FEC
+
+From: Yeongjin Gil <youngjin.gil@samsung.com>
+
+[ Upstream commit e8c5d45f82ce0c238a4817739892fe8897a3dcc3 ]
+
+In verity_end_io(), if bi_status is not BLK_STS_OK, it can be return
+directly. But if FEC configured, it is desired to correct the data page
+through verity_verify_io. And the return value will be converted to
+blk_status and passed to verity_finish_io().
+
+BTW, when a bit is set in v->validated_blocks, verity_verify_io() skips
+verification regardless of I/O error for the corresponding bio. In this
+case, the I/O error could not be returned properly, and as a result,
+there is a problem that abnormal data could be read for the
+corresponding block.
+
+To fix this problem, when an I/O error occurs, do not skip verification
+even if the bit related is set in v->validated_blocks.
+
+Fixes: 843f38d382b1 ("dm verity: add 'check_at_most_once' option to only validate hashes once")
+Cc: stable@vger.kernel.org
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Yeongjin Gil <youngjin.gil@samsung.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-verity-target.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 08a135f7ef431..d116495a3445e 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -478,7 +478,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ sector_t cur_block = io->block + b;
+ struct ahash_request *req = verity_io_hash_req(v, io);
+
+- if (v->validated_blocks &&
++ if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
+ likely(test_bit(cur_block, v->validated_blocks))) {
+ verity_bv_skip_block(v, io, &io->iter);
+ continue;
+--
+2.39.2
+
--- /dev/null
+From a1f9c2619550df502a352b6381051bf6d76774b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Sep 2021 09:26:42 +0000
+Subject: dm verity: skip redundant verity_handle_err() on I/O errors
+
+From: Akilesh Kailash <akailash@google.com>
+
+[ Upstream commit 2c0468e054c0adb660ac055fc396622ec7235df9 ]
+
+Without FEC, dm-verity won't call verity_handle_err() when I/O fails,
+but with FEC enabled, it currently does even if an I/O error has
+occurred.
+
+If there is an I/O error and FEC correction fails, return the error
+instead of calling verity_handle_err() again.
+
+Suggested-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Akilesh Kailash <akailash@google.com>
+Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Stable-dep-of: e8c5d45f82ce ("dm verity: fix error handling for check_at_most_once on FEC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-verity-target.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 36945030520a9..08a135f7ef431 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -471,6 +471,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ struct bvec_iter start;
+ unsigned b;
+ struct crypto_wait wait;
++ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+
+ for (b = 0; b < io->n_blocks; b++) {
+ int r;
+@@ -525,9 +526,17 @@ static int verity_verify_io(struct dm_verity_io *io)
+ else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block, NULL, &start) == 0)
+ continue;
+- else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+- cur_block))
+- return -EIO;
++ else {
++ if (bio->bi_status) {
++ /*
++ * Error correction failed; Just return error
++ */
++ return -EIO;
++ }
++ if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
++ cur_block))
++ return -EIO;
++ }
+ }
+
+ return 0;
+--
+2.39.2
+
--- /dev/null
+From 1a02ead1ee2f4e37a6fd675f44dcaaa107f99d62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Oct 2018 11:29:02 -0500
+Subject: ipmi: Fix how the lower layers are told to watch for messages
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit c65ea996595005be470fbfa16711deba414fd33b ]
+
+The IPMI driver has a mechanism to tell the lower layers it needs
+to watch for messages, commands, and watchdogs (so it doesn't
+needlessly poll). However, it needed some extensions, it needed
+a way to tell what is being waited for so it could set the timeout
+appropriately.
+
+The update to the lower layer was also being done once a second
+at best because it was done in the main timeout handler. However,
+if a command is sent and a response message is coming back,
+it needed to be started immediately. So modify the code to
+update immediately if it needs to be enabled. Disable is still
+lazy.
+
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Kamlakant Patel <kamlakant.patel@cavium.com>
+Stable-dep-of: 6d2555cde291 ("ipmi: fix SSIF not responding under certain cond.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 119 ++++++++++++++++++++--------
+ drivers/char/ipmi/ipmi_si_intf.c | 5 +-
+ drivers/char/ipmi/ipmi_ssif.c | 26 +++---
+ include/linux/ipmi_smi.h | 36 +++++++--
+ 4 files changed, 134 insertions(+), 52 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 4265e8d3e71c5..31cfa47d24984 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -536,9 +536,22 @@ struct ipmi_smi {
+ unsigned int waiting_events_count; /* How many events in queue? */
+ char delivering_events;
+ char event_msg_printed;
++
++ /* How many users are waiting for events? */
+ atomic_t event_waiters;
+ unsigned int ticks_to_req_ev;
+- int last_needs_timer;
++
++ /* How many users are waiting for commands? */
++ atomic_t command_waiters;
++
++ /* How many users are waiting for watchdogs? */
++ atomic_t watchdog_waiters;
++
++ /*
++ * Tells what the lower layer has last been asked to watch for,
++ * messages and/or watchdogs. Protected by xmit_msgs_lock.
++ */
++ unsigned int last_watch_mask;
+
+ /*
+ * The event receiver for my BMC, only really used at panic
+@@ -1085,6 +1098,29 @@ static int intf_err_seq(struct ipmi_smi *intf,
+ return rv;
+ }
+
++/* Must be called with xmit_msgs_lock held. */
++static void smi_tell_to_watch(struct ipmi_smi *intf,
++ unsigned int flags,
++ struct ipmi_smi_msg *smi_msg)
++{
++ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
++ if (!smi_msg)
++ return;
++
++ if (!smi_msg->needs_response)
++ return;
++ }
++
++ if (!intf->handlers->set_need_watch)
++ return;
++
++ if ((intf->last_watch_mask & flags) == flags)
++ return;
++
++ intf->last_watch_mask |= flags;
++ intf->handlers->set_need_watch(intf->send_info,
++ intf->last_watch_mask);
++}
+
+ static void free_user_work(struct work_struct *work)
+ {
+@@ -1164,8 +1200,9 @@ int ipmi_create_user(unsigned int if_num,
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+ if (handler->ipmi_watchdog_pretimeout) {
+ /* User wants pretimeouts, so make sure to watch for them. */
+- if (atomic_inc_return(&intf->event_waiters) == 1)
+- need_waiter(intf);
++ if (atomic_inc_return(&intf->watchdog_waiters) == 1)
++ smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
++ NULL);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ *user = new_user;
+@@ -1239,7 +1276,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ user->handler->shutdown(user->handler_data);
+
+ if (user->handler->ipmi_watchdog_pretimeout)
+- atomic_dec(&intf->event_waiters);
++ atomic_dec(&intf->watchdog_waiters);
+
+ if (user->gets_events)
+ atomic_dec(&intf->event_waiters);
+@@ -1597,8 +1634,8 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
+ goto out_unlock;
+ }
+
+- if (atomic_inc_return(&intf->event_waiters) == 1)
+- need_waiter(intf);
++ if (atomic_inc_return(&intf->command_waiters) == 1)
++ smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);
+
+ list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
+
+@@ -1648,7 +1685,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
+ synchronize_rcu();
+ release_ipmi_user(user, index);
+ while (rcvrs) {
+- atomic_dec(&intf->event_waiters);
++ atomic_dec(&intf->command_waiters);
+ rcvr = rcvrs;
+ rcvrs = rcvr->next;
+ kfree(rcvr);
+@@ -1765,22 +1802,21 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
+ return smi_msg;
+ }
+
+-
+ static void smi_send(struct ipmi_smi *intf,
+ const struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+ {
+ int run_to_completion = intf->run_to_completion;
++ unsigned long flags = 0;
+
+- if (run_to_completion) {
+- smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+- } else {
+- unsigned long flags;
+-
++ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+- smi_msg = smi_add_send_msg(intf, smi_msg, priority);
++ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
++
++ smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);
++
++ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+- }
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+@@ -1978,6 +2014,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
+ ipmb_seq, broadcast,
+ source_address, source_lun);
+
++ /* We will be getting a response in the BMC message queue. */
++ smi_msg->needs_response = true;
++
+ /*
+ * Copy the message into the recv message data, so we
+ * can retransmit it later if necessary.
+@@ -2165,6 +2204,7 @@ static int i_ipmi_request(struct ipmi_user *user,
+ goto out;
+ }
+ }
++ smi_msg->needs_response = false;
+
+ rcu_read_lock();
+ if (intf->in_shutdown) {
+@@ -3386,6 +3426,8 @@ int ipmi_add_smi(struct module *owner,
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
+ spin_lock_init(&intf->events_lock);
+ atomic_set(&intf->event_waiters, 0);
++ atomic_set(&intf->watchdog_waiters, 0);
++ atomic_set(&intf->command_waiters, 0);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ INIT_LIST_HEAD(&intf->waiting_events);
+ intf->waiting_events_count = 0;
+@@ -4404,6 +4446,9 @@ static void smi_recv_tasklet(unsigned long val)
+ intf->curr_msg = newmsg;
+ }
+ }
++
++ smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);
++
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ if (newmsg)
+@@ -4531,7 +4576,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
+ int slot, unsigned long *flags,
+- unsigned int *waiting_msgs)
++ unsigned int *watch_mask)
+ {
+ struct ipmi_recv_msg *msg;
+
+@@ -4543,7 +4588,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
+- (*waiting_msgs)++;
++ *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+ return;
+ }
+
+@@ -4562,7 +4607,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
+ struct ipmi_smi_msg *smi_msg;
+ /* More retries, send again. */
+
+- (*waiting_msgs)++;
++ *watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
+
+ /*
+ * Start with the max timer, set to normal timer after
+@@ -4614,13 +4659,13 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
+ struct ipmi_recv_msg *msg, *msg2;
+ unsigned long flags;
+ int i;
+- unsigned int waiting_msgs = 0;
++ unsigned int watch_mask = 0;
+
+ if (!intf->bmc_registered) {
+ kref_get(&intf->refcount);
+ if (!schedule_work(&intf->bmc_reg_work)) {
+ kref_put(&intf->refcount, intf_free);
+- waiting_msgs++;
++ watch_mask |= IPMI_WATCH_MASK_INTERNAL;
+ }
+ }
+
+@@ -4640,7 +4685,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &intf->seq_table[i],
+ &timeouts, timeout_period, i,
+- &flags, &waiting_msgs);
++ &flags, &watch_mask);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+@@ -4671,7 +4716,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
+
+ tasklet_schedule(&intf->recv_tasklet);
+
+- return waiting_msgs;
++ return watch_mask;
+ }
+
+ static void ipmi_request_event(struct ipmi_smi *intf)
+@@ -4691,37 +4736,43 @@ static atomic_t stop_operation;
+ static void ipmi_timeout(struct timer_list *unused)
+ {
+ struct ipmi_smi *intf;
+- int nt = 0, index;
++ unsigned int watch_mask = 0;
++ int index;
++ unsigned long flags;
+
+ if (atomic_read(&stop_operation))
+ return;
+
+ index = srcu_read_lock(&ipmi_interfaces_srcu);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+- int lnt = 0;
+-
+ if (atomic_read(&intf->event_waiters)) {
+ intf->ticks_to_req_ev--;
+ if (intf->ticks_to_req_ev == 0) {
+ ipmi_request_event(intf);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+- lnt++;
++ watch_mask |= IPMI_WATCH_MASK_INTERNAL;
+ }
+
+- lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
++ if (atomic_read(&intf->watchdog_waiters))
++ watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
+
+- lnt = !!lnt;
+- if (lnt != intf->last_needs_timer &&
+- intf->handlers->set_need_watch)
+- intf->handlers->set_need_watch(intf->send_info, lnt);
+- intf->last_needs_timer = lnt;
++ if (atomic_read(&intf->command_waiters))
++ watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
++
++ watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+
+- nt += lnt;
++ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
++ if (watch_mask != intf->last_watch_mask &&
++ intf->handlers->set_need_watch)
++ intf->handlers->set_need_watch(intf->send_info,
++ watch_mask);
++ intf->last_watch_mask = watch_mask;
++ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ }
+ srcu_read_unlock(&ipmi_interfaces_srcu, index);
+
+- if (nt)
++ if (watch_mask)
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+ }
+
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index a5e1dce042e8e..429fe063e33ff 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1073,10 +1073,13 @@ static void request_events(void *send_info)
+ atomic_set(&smi_info->req_events, 1);
+ }
+
+-static void set_need_watch(void *send_info, bool enable)
++static void set_need_watch(void *send_info, unsigned int watch_mask)
+ {
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
++ int enable;
++
++ enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL);
+
+ atomic_set(&smi_info->need_watch, enable);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 469da2290c2a0..e760501e50b20 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -91,8 +91,8 @@
+ /*
+ * Timeout for the watch, only used for get flag timer.
+ */
+-#define SSIF_WATCH_TIMEOUT_MSEC 100
+-#define SSIF_WATCH_TIMEOUT_JIFFIES msecs_to_jiffies(SSIF_WATCH_TIMEOUT_MSEC)
++#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10)
++#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
+
+ enum ssif_intf_state {
+ SSIF_NORMAL,
+@@ -274,7 +274,7 @@ struct ssif_info {
+ struct timer_list retry_timer;
+ int retries_left;
+
+- bool need_watch; /* Need to look for flags? */
++ long watch_timeout; /* Timeout for flags check, 0 if off. */
+ struct timer_list watch_timer; /* Flag fetch timer. */
+
+ /* Info from SSIF cmd */
+@@ -576,9 +576,9 @@ static void watch_timeout(struct timer_list *t)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+- if (ssif_info->need_watch) {
++ if (ssif_info->watch_timeout) {
+ mod_timer(&ssif_info->watch_timer,
+- jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
++ jiffies + ssif_info->watch_timeout);
+ if (SSIF_IDLE(ssif_info)) {
+ start_flag_fetch(ssif_info, flags); /* Releases lock */
+ return;
+@@ -1151,17 +1151,23 @@ static void request_events(void *send_info)
+ * Upper layer is changing the flag saying whether we need to request
+ * flags periodically or not.
+ */
+-static void ssif_set_need_watch(void *send_info, bool enable)
++static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
+ {
+ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ unsigned long oflags, *flags;
++ long timeout = 0;
++
++ if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
++ timeout = SSIF_WATCH_MSG_TIMEOUT;
++ else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL)
++ timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+- if (enable != ssif_info->need_watch) {
+- ssif_info->need_watch = enable;
+- if (ssif_info->need_watch)
++ if (timeout != ssif_info->watch_timeout) {
++ ssif_info->watch_timeout = timeout;
++ if (ssif_info->watch_timeout)
+ mod_timer(&ssif_info->watch_timer,
+- jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
++ jiffies + ssif_info->watch_timeout);
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
+index 1995ce1467890..86b119400f301 100644
+--- a/include/linux/ipmi_smi.h
++++ b/include/linux/ipmi_smi.h
+@@ -30,6 +30,17 @@ struct device;
+ /* Structure for the low-level drivers. */
+ typedef struct ipmi_smi *ipmi_smi_t;
+
++/*
++ * Flags for set_check_watch() below. Tells if the SMI should be
++ * waiting for watchdog timeouts, commands and/or messages. There is
++ * also an internal flag for the message handler, SMIs should ignore
++ * it.
++ */
++#define IPMI_WATCH_MASK_INTERNAL (1 << 0)
++#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 1)
++#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 2)
++#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 3)
++
+ /*
+ * Messages to/from the lower layer. The smi interface will take one
+ * of these to send. After the send has occurred and a response has
+@@ -55,8 +66,16 @@ struct ipmi_smi_msg {
+ int rsp_size;
+ unsigned char rsp[IPMI_MAX_MSG_LENGTH];
+
+- /* Will be called when the system is done with the message
+- (presumably to free it). */
++ /*
++ * There should be a response message coming back in the BMC
++ * message queue.
++ */
++ bool needs_response;
++
++ /*
++ * Will be called when the system is done with the message
++ * (presumably to free it).
++ */
+ void (*done)(struct ipmi_smi_msg *msg);
+ };
+
+@@ -105,12 +124,15 @@ struct ipmi_smi_handlers {
+
+ /*
+ * Called by the upper layer when some user requires that the
+- * interface watch for events, received messages, watchdog
+- * pretimeouts, or not. Used by the SMI to know if it should
+- * watch for these. This may be NULL if the SMI does not
+- * implement it.
++ * interface watch for received messages and watchdog
++ * pretimeouts (basically do a "Get Flags", or not. Used by
++ * the SMI to know if it should watch for these. This may be
++ * NULL if the SMI does not implement it. watch_mask is from
++ * IPMI_WATCH_MASK_xxx above. The interface should run slower
++ * timeouts for just watchdog checking or faster timeouts when
++ * waiting for the message queue.
+ */
+- void (*set_need_watch)(void *send_info, bool enable);
++ void (*set_need_watch)(void *send_info, unsigned int watch_mask);
+
+ /*
+ * Called when flushing all pending messages.
+--
+2.39.2
+
--- /dev/null
+From 1dd6f7845ebb67948e7849941991034978bd9a54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Oct 2018 15:30:57 -0500
+Subject: ipmi: Fix SSIF flag requests
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit a1466ec5b671651b848df17fc9233ecbb7d35f9f ]
+
+Commit 89986496de141 ("ipmi: Turn off all activity on an idle ipmi
+interface") modified the IPMI code to only request events when the
+driver had somethine waiting for events. The SSIF code, however,
+was using the event fetch request to also fetch the flags.
+
+Add a timer and the proper handling for the upper layer telling
+whether flags fetches are required.
+
+Reported-by: Kamlakant Patel <Kamlakant.Patel@cavium.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Kamlakant Patel <kamlakant.patel@cavium.com>
+Stable-dep-of: 6d2555cde291 ("ipmi: fix SSIF not responding under certain cond.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 64 ++++++++++++++++++++++++++++-------
+ 1 file changed, 52 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index fd1a487443f02..469da2290c2a0 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -88,6 +88,12 @@
+ #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+ #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
++/*
++ * Timeout for the watch, only used for get flag timer.
++ */
++#define SSIF_WATCH_TIMEOUT_MSEC 100
++#define SSIF_WATCH_TIMEOUT_JIFFIES msecs_to_jiffies(SSIF_WATCH_TIMEOUT_MSEC)
++
+ enum ssif_intf_state {
+ SSIF_NORMAL,
+ SSIF_GETTING_FLAGS,
+@@ -268,6 +274,9 @@ struct ssif_info {
+ struct timer_list retry_timer;
+ int retries_left;
+
++ bool need_watch; /* Need to look for flags? */
++ struct timer_list watch_timer; /* Flag fetch timer. */
++
+ /* Info from SSIF cmd */
+ unsigned char max_xmit_msg_size;
+ unsigned char max_recv_msg_size;
+@@ -558,6 +567,26 @@ static void retry_timeout(struct timer_list *t)
+ start_get(ssif_info);
+ }
+
++static void watch_timeout(struct timer_list *t)
++{
++ struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
++ unsigned long oflags, *flags;
++
++ if (ssif_info->stopping)
++ return;
++
++ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
++ if (ssif_info->need_watch) {
++ mod_timer(&ssif_info->watch_timer,
++ jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
++ if (SSIF_IDLE(ssif_info)) {
++ start_flag_fetch(ssif_info, flags); /* Releases lock */
++ return;
++ }
++ ssif_info->req_flags = true;
++ }
++ ipmi_ssif_unlock_cond(ssif_info, flags);
++}
+
+ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ unsigned int data)
+@@ -1103,8 +1132,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+ }
+
+ /*
+- * Instead of having our own timer to periodically check the message
+- * flags, we let the message handler drive us.
++ * Upper layer wants us to request events.
+ */
+ static void request_events(void *send_info)
+ {
+@@ -1115,18 +1143,27 @@ static void request_events(void *send_info)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+- /*
+- * Request flags first, not events, because the lower layer
+- * doesn't have a way to send an attention. But make sure
+- * event checking still happens.
+- */
+ ssif_info->req_events = true;
+- if (SSIF_IDLE(ssif_info))
+- start_flag_fetch(ssif_info, flags);
+- else {
+- ssif_info->req_flags = true;
+- ipmi_ssif_unlock_cond(ssif_info, flags);
++ ipmi_ssif_unlock_cond(ssif_info, flags);
++}
++
++/*
++ * Upper layer is changing the flag saying whether we need to request
++ * flags periodically or not.
++ */
++static void ssif_set_need_watch(void *send_info, bool enable)
++{
++ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
++ unsigned long oflags, *flags;
++
++ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
++ if (enable != ssif_info->need_watch) {
++ ssif_info->need_watch = enable;
++ if (ssif_info->need_watch)
++ mod_timer(&ssif_info->watch_timer,
++ jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
+ }
++ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+
+ static int ssif_start_processing(void *send_info,
+@@ -1253,6 +1290,7 @@ static void shutdown_ssif(void *send_info)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
++ del_timer_sync(&ssif_info->watch_timer);
+ del_timer_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread) {
+ complete(&ssif_info->wake_thread);
+@@ -1632,6 +1670,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ spin_lock_init(&ssif_info->lock);
+ ssif_info->ssif_state = SSIF_NORMAL;
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
++ timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+
+ for (i = 0; i < SSIF_NUM_STATS; i++)
+ atomic_set(&ssif_info->stats[i], 0);
+@@ -1645,6 +1684,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ ssif_info->handlers.get_smi_info = get_smi_info;
+ ssif_info->handlers.sender = sender;
+ ssif_info->handlers.request_events = request_events;
++ ssif_info->handlers.set_need_watch = ssif_set_need_watch;
+
+ {
+ unsigned int thread_num;
+--
+2.39.2
+
--- /dev/null
+From cdffaee1ed656f4779efa4a3a5e655bb1dbebdc5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Apr 2023 15:49:07 +0800
+Subject: ipmi: fix SSIF not responding under certain cond.
+
+From: Zhang Yuchen <zhangyuchen.lcr@bytedance.com>
+
+[ Upstream commit 6d2555cde2918409b0331560e66f84a0ad4849c6 ]
+
+The ipmi communication is not restored after a specific version of BMC is
+upgraded on our server.
+The ipmi driver does not respond after printing the following log:
+
+ ipmi_ssif: Invalid response getting flags: 1c 1
+
+I found that after entering this branch, ssif_info->ssif_state always
+holds SSIF_GETTING_FLAGS and never return to IDLE.
+
+As a result, the driver cannot be loaded, because the driver status is
+checked during the unload process and must be IDLE in shutdown_ssif():
+
+ while (ssif_info->ssif_state != SSIF_IDLE)
+ schedule_timeout(1);
+
+The process trigger this problem is:
+
+1. One msg timeout and next msg start send, and call
+ssif_set_need_watch().
+
+2. ssif_set_need_watch()->watch_timeout()->start_flag_fetch() change
+ssif_state to SSIF_GETTING_FLAGS.
+
+3. In msg_done_handler() ssif_state == SSIF_GETTING_FLAGS, if an error
+message is received, the second branch does not modify the ssif_state.
+
+4. All retry action need IS_SSIF_IDLE() == True. Include retry action in
+watch_timeout(), msg_done_handler(). Sending msg does not work either.
+SSIF_IDLE is also checked in start_next_msg().
+
+5. The only thing that can be triggered in the SSIF driver is
+watch_timeout(), after destory_user(), this timer will stop too.
+
+So, if enter this branch, the ssif_state will remain SSIF_GETTING_FLAGS
+and can't send msg, no timer started, can't unload.
+
+We did a comparative test before and after adding this patch, and the
+result is effective.
+
+Fixes: 259307074bfc ("ipmi: Add SMBus interface driver (SSIF)")
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhang Yuchen <zhangyuchen.lcr@bytedance.com>
+Message-Id: <20230412074907.80046-1-zhangyuchen.lcr@bytedance.com>
+Signed-off-by: Corey Minyard <minyard@acm.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 566be60fa1377..34c5b287c4125 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -801,9 +801,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+- * Don't abort here, maybe it was a queued
+- * response to a previous command.
++ * Recv error response, give up.
+ */
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ pr_warn(PFX "Invalid response getting flags: %x %x\n",
+ data[0], data[1]);
+--
+2.39.2
+
--- /dev/null
+From 875da679e65079e940d36659927ee60cef840017 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 10:13:13 -0600
+Subject: ipmi_ssif: Rename idle state and check
+
+From: Corey Minyard <cminyard@mvista.com>
+
+[ Upstream commit 8230831c43a328c2be6d28c65d3f77e14c59986b ]
+
+Rename the SSIF_IDLE() to IS_SSIF_IDLE(), since that is more clear, and
+rename SSIF_NORMAL to SSIF_IDLE, since that's more accurate.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Stable-dep-of: 6d2555cde291 ("ipmi: fix SSIF not responding under certain cond.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/ipmi/ipmi_ssif.c | 46 +++++++++++++++++------------------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index e760501e50b20..566be60fa1377 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -95,7 +95,7 @@
+ #define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
+
+ enum ssif_intf_state {
+- SSIF_NORMAL,
++ SSIF_IDLE,
+ SSIF_GETTING_FLAGS,
+ SSIF_GETTING_EVENTS,
+ SSIF_CLEARING_FLAGS,
+@@ -103,8 +103,8 @@ enum ssif_intf_state {
+ /* FIXME - add watchdog stuff. */
+ };
+
+-#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
+- && (ssif)->curr_msg == NULL)
++#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
++ && (ssif)->curr_msg == NULL)
+
+ /*
+ * Indexes into stats[] in ssif_info below.
+@@ -349,9 +349,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
+
+ /*
+ * Must be called with the message lock held. This will release the
+- * message lock. Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock. Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+ */
+ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -368,7 +368,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+
+ if (start_send(ssif_info, msg, 3) != 0) {
+ /* Error, just go to normal state. */
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ }
+ }
+
+@@ -383,7 +383,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ if (start_send(ssif_info, mb, 2) != 0)
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ }
+
+ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+@@ -394,7 +394,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->curr_msg = NULL;
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ ipmi_free_smi_msg(msg);
+ }
+@@ -408,7 +408,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+@@ -431,7 +431,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+@@ -449,9 +449,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+
+ /*
+ * Must be called with the message lock held. This will release the
+- * message lock. Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock. Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+ */
+ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -467,7 +467,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ /* Events available. */
+ start_event_fetch(ssif_info, flags);
+ else {
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+ }
+@@ -579,7 +579,7 @@ static void watch_timeout(struct timer_list *t)
+ if (ssif_info->watch_timeout) {
+ mod_timer(&ssif_info->watch_timer,
+ jiffies + ssif_info->watch_timeout);
+- if (SSIF_IDLE(ssif_info)) {
++ if (IS_SSIF_IDLE(ssif_info)) {
+ start_flag_fetch(ssif_info, flags); /* Releases lock */
+ return;
+ }
+@@ -776,7 +776,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ }
+
+ switch (ssif_info->ssif_state) {
+- case SSIF_NORMAL:
++ case SSIF_IDLE:
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (!msg)
+ break;
+@@ -794,7 +794,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ * Error fetching flags, or invalid length,
+ * just give up for now.
+ */
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ pr_warn(PFX "Error getting flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
+@@ -825,7 +825,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ data[0], data[1]);
+ }
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+
+@@ -901,7 +901,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ }
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+- if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
++ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (ssif_info->req_events)
+ start_event_fetch(ssif_info, flags);
+ else if (ssif_info->req_flags)
+@@ -1070,7 +1070,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+ unsigned long oflags;
+
+ restart:
+- if (!SSIF_IDLE(ssif_info)) {
++ if (!IS_SSIF_IDLE(ssif_info)) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+@@ -1292,7 +1292,7 @@ static void shutdown_ssif(void *send_info)
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+
+ /* make sure the driver is not looking for flags any more. */
+- while (ssif_info->ssif_state != SSIF_NORMAL)
++ while (ssif_info->ssif_state != SSIF_IDLE)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
+@@ -1674,7 +1674,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ }
+
+ spin_lock_init(&ssif_info->lock);
+- ssif_info->ssif_state = SSIF_NORMAL;
++ ssif_info->ssif_state = SSIF_IDLE;
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+ timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+
+--
+2.39.2
+
--- /dev/null
+From fd06e23c473539b205b27e06414464e8e23b5342 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 16:51:30 -0700
+Subject: kernel/relay.c: fix read_pos error when multiple readers
+
+From: Pengcheng Yang <yangpc@wangsu.com>
+
+[ Upstream commit 341a7213e5c1ce274cc0f02270054905800ea660 ]
+
+When reading, read_pos should start with bytes_consumed, not file->f_pos.
+Because when there is more than one reader, the read_pos corresponding to
+file->f_pos may have been consumed, which will cause the data that has
+been consumed to be read and the bytes_consumed update error.
+
+Signed-off-by: Pengcheng Yang <yangpc@wangsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>e
+Link: http://lkml.kernel.org/r/1579691175-28949-1-git-send-email-yangpc@wangsu.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Stable-dep-of: 43ec16f1450f ("relayfs: fix out-of-bounds access in relay_file_read")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/relay.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/relay.c b/kernel/relay.c
+index b7aa7df43955b..0f027e04b0094 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -997,14 +997,14 @@ static void relay_file_read_consume(struct rchan_buf *buf,
+ /*
+ * relay_file_read_avail - boolean, are there unconsumed bytes available?
+ */
+-static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
++static int relay_file_read_avail(struct rchan_buf *buf)
+ {
+ size_t subbuf_size = buf->chan->subbuf_size;
+ size_t n_subbufs = buf->chan->n_subbufs;
+ size_t produced = buf->subbufs_produced;
+ size_t consumed = buf->subbufs_consumed;
+
+- relay_file_read_consume(buf, read_pos, 0);
++ relay_file_read_consume(buf, 0, 0);
+
+ consumed = buf->subbufs_consumed;
+
+@@ -1065,23 +1065,20 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
+
+ /**
+ * relay_file_read_start_pos - find the first available byte to read
+- * @read_pos: file read position
+ * @buf: relay channel buffer
+ *
+- * If the @read_pos is in the middle of padding, return the
++ * If the read_pos is in the middle of padding, return the
+ * position of the first actually available byte, otherwise
+ * return the original value.
+ */
+-static size_t relay_file_read_start_pos(size_t read_pos,
+- struct rchan_buf *buf)
++static size_t relay_file_read_start_pos(struct rchan_buf *buf)
+ {
+ size_t read_subbuf, padding, padding_start, padding_end;
+ size_t subbuf_size = buf->chan->subbuf_size;
+ size_t n_subbufs = buf->chan->n_subbufs;
+ size_t consumed = buf->subbufs_consumed % n_subbufs;
++ size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
+
+- if (!read_pos)
+- read_pos = consumed * subbuf_size + buf->bytes_consumed;
+ read_subbuf = read_pos / subbuf_size;
+ padding = buf->padding[read_subbuf];
+ padding_start = (read_subbuf + 1) * subbuf_size - padding;
+@@ -1137,10 +1134,10 @@ static ssize_t relay_file_read(struct file *filp,
+ do {
+ void *from;
+
+- if (!relay_file_read_avail(buf, *ppos))
++ if (!relay_file_read_avail(buf))
+ break;
+
+- read_start = relay_file_read_start_pos(*ppos, buf);
++ read_start = relay_file_read_start_pos(buf);
+ avail = relay_file_read_subbuf_avail(read_start, buf);
+ if (!avail)
+ break;
+--
+2.39.2
+
--- /dev/null
+From ea13a42da5f4630899734d5fa2d63a227104f9df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jul 2019 15:22:59 +0200
+Subject: nohz: Add TICK_DEP_BIT_RCU
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+[ Upstream commit 01b4c39901e087ceebae2733857248de81476bd8 ]
+
+If a nohz_full CPU is looping in the kernel, the scheduling-clock tick
+might nevertheless remain disabled. In !PREEMPT kernels, this can
+prevent RCU's attempts to enlist the aid of that CPU's executions of
+cond_resched(), which can in turn result in an arbitrarily delayed grace
+period and thus an OOM. RCU therefore needs a way to enable a holdout
+nohz_full CPU's scheduler-clock interrupt.
+
+This commit therefore provides a new TICK_DEP_BIT_RCU value which RCU can
+pass to tick_dep_set_cpu() and friends to force on the scheduler-clock
+interrupt for a specified CPU or task. In some cases, rcutorture needs
+to turn on the scheduler-clock tick, so this commit also exports the
+relevant symbols to GPL-licensed modules.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Stable-dep-of: 58d766824264 ("tick/nohz: Fix cpu_is_hotpluggable() by checking with nohz subsystem")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tick.h | 7 ++++++-
+ include/trace/events/timer.h | 3 ++-
+ kernel/time/tick-sched.c | 7 +++++++
+ 3 files changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index 55388ab45fd4d..965163bdfe412 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -102,7 +102,8 @@ enum tick_dep_bits {
+ TICK_DEP_BIT_POSIX_TIMER = 0,
+ TICK_DEP_BIT_PERF_EVENTS = 1,
+ TICK_DEP_BIT_SCHED = 2,
+- TICK_DEP_BIT_CLOCK_UNSTABLE = 3
++ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
++ TICK_DEP_BIT_RCU = 4
+ };
+
+ #define TICK_DEP_MASK_NONE 0
+@@ -110,6 +111,7 @@ enum tick_dep_bits {
+ #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
+ #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
+ #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
++#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ extern bool tick_nohz_enabled;
+@@ -257,6 +259,9 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
+ static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+
++static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
++static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
++
+ static inline void tick_dep_set(enum tick_dep_bits bit) { }
+ static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+ static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index a57e4ee989d62..350b046e7576c 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -362,7 +362,8 @@ TRACE_EVENT(itimer_expire,
+ tick_dep_name(POSIX_TIMER) \
+ tick_dep_name(PERF_EVENTS) \
+ tick_dep_name(SCHED) \
+- tick_dep_name_end(CLOCK_UNSTABLE)
++ tick_dep_name(CLOCK_UNSTABLE) \
++ tick_dep_name_end(RCU)
+
+ #undef tick_dep_name
+ #undef tick_dep_mask_name
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 48403fb653c2f..7228bdd2eabe2 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -199,6 +199,11 @@ static bool check_tick_dependency(atomic_t *dep)
+ return true;
+ }
+
++ if (val & TICK_DEP_MASK_RCU) {
++ trace_tick_stop(0, TICK_DEP_MASK_RCU);
++ return true;
++ }
++
+ return false;
+ }
+
+@@ -325,6 +330,7 @@ void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
+ preempt_enable();
+ }
+ }
++EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
+
+ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
+ {
+@@ -332,6 +338,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
+
+ atomic_andnot(BIT(bit), &ts->tick_dep_mask);
+ }
++EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
+
+ /*
+ * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
+--
+2.39.2
+
--- /dev/null
+From bb9aa80bf92e6c4d2a09f5c56fe61b32ec355be3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Apr 2023 12:02:03 +0800
+Subject: relayfs: fix out-of-bounds access in relay_file_read
+
+From: Zhang Zhengming <zhang.zhengming@h3c.com>
+
+[ Upstream commit 43ec16f1450f4936025a9bdf1a273affdb9732c1 ]
+
+There is a crash in relay_file_read, as the var from
+point to the end of last subbuf.
+
+The oops looks something like:
+pc : __arch_copy_to_user+0x180/0x310
+lr : relay_file_read+0x20c/0x2c8
+Call trace:
+ __arch_copy_to_user+0x180/0x310
+ full_proxy_read+0x68/0x98
+ vfs_read+0xb0/0x1d0
+ ksys_read+0x6c/0xf0
+ __arm64_sys_read+0x20/0x28
+ el0_svc_common.constprop.3+0x84/0x108
+ do_el0_svc+0x74/0x90
+ el0_svc+0x1c/0x28
+ el0_sync_handler+0x88/0xb0
+ el0_sync+0x148/0x180
+
+We get the condition by analyzing the vmcore:
+
+1). The last produced byte and last consumed byte
+ both at the end of the last subbuf
+
+2). A softirq calls function(e.g __blk_add_trace)
+ to write relay buffer occurs when an program is calling
+ relay_file_read_avail().
+
+ relay_file_read
+ relay_file_read_avail
+ relay_file_read_consume(buf, 0, 0);
+ //interrupted by softirq who will write subbuf
+ ....
+ return 1;
+ //read_start point to the end of the last subbuf
+ read_start = relay_file_read_start_pos
+ //avail is equal to subsize
+ avail = relay_file_read_subbuf_avail
+ //from points to an invalid memory address
+ from = buf->start + read_start
+ //system is crashed
+ copy_to_user(buffer, from, avail)
+
+Link: https://lkml.kernel.org/r/20230419040203.37676-1-zhang.zhengming@h3c.com
+Fixes: 8d62fdebdaf9 ("relay file read: start-pos fix")
+Signed-off-by: Zhang Zhengming <zhang.zhengming@h3c.com>
+Reviewed-by: Zhao Lei <zhao_lei1@hoperun.com>
+Reviewed-by: Zhou Kete <zhou.kete@h3c.com>
+Reviewed-by: Pengcheng Yang <yangpc@wangsu.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/relay.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 0f027e04b0094..e6f70f4c41a36 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -1077,7 +1077,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
+ size_t subbuf_size = buf->chan->subbuf_size;
+ size_t n_subbufs = buf->chan->n_subbufs;
+ size_t consumed = buf->subbufs_consumed % n_subbufs;
+- size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
++ size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
++ % (n_subbufs * subbuf_size);
+
+ read_subbuf = read_pos / subbuf_size;
+ padding = buf->padding[read_subbuf];
+--
+2.39.2
+
perf-auxtrace-fix-address-filter-entire-kernel-size.patch
debugobject-ensure-pool-refill-again.patch
netfilter-nf_tables-deactivate-anonymous-set-from-preparation-phase.patch
+nohz-add-tick_dep_bit_rcu.patch
+tick-nohz-fix-cpu_is_hotpluggable-by-checking-with-n.patch
+ipmi-fix-ssif-flag-requests.patch
+ipmi-fix-how-the-lower-layers-are-told-to-watch-for-.patch
+ipmi_ssif-rename-idle-state-and-check.patch
+ipmi-fix-ssif-not-responding-under-certain-cond.patch
+dm-verity-skip-redundant-verity_handle_err-on-i-o-er.patch
+dm-verity-fix-error-handling-for-check_at_most_once-.patch
+kernel-relay.c-fix-read_pos-error-when-multiple-read.patch
+relayfs-fix-out-of-bounds-access-in-relay_file_read.patch
--- /dev/null
+From f8505f14404f6760e01e81b0394fcee4d60f8f0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 17:31:26 +0000
+Subject: tick/nohz: Fix cpu_is_hotpluggable() by checking with nohz subsystem
+
+From: Joel Fernandes (Google) <joel@joelfernandes.org>
+
+[ Upstream commit 58d7668242647e661a20efe065519abd6454287e ]
+
+For CONFIG_NO_HZ_FULL systems, the tick_do_timer_cpu cannot be offlined.
+However, cpu_is_hotpluggable() still returns true for those CPUs. This causes
+torture tests that do offlining to end up trying to offline this CPU causing
+test failures. Such failure happens on all architectures.
+
+Fix the repeated error messages thrown by this (even if the hotplug errors are
+harmless) by asking the opinion of the nohz subsystem on whether the CPU can be
+hotplugged.
+
+[ Apply Frederic Weisbecker feedback on refactoring tick_nohz_cpu_down(). ]
+
+For drivers/base/ portion:
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Frederic Weisbecker <frederic@kernel.org>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Zhouyi Zhou <zhouzhouyi@gmail.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: rcu <rcu@vger.kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 2987557f52b9 ("driver-core/cpu: Expose hotpluggability to the rest of the kernel")
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/cpu.c | 3 ++-
+ include/linux/tick.h | 2 ++
+ kernel/time/tick-sched.c | 11 ++++++++---
+ 3 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index ce5b3ffbd6eef..878ed43d87539 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -494,7 +494,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
+ bool cpu_is_hotpluggable(unsigned cpu)
+ {
+ struct device *dev = get_cpu_device(cpu);
+- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
++ return dev && container_of(dev, struct cpu, dev)->hotpluggable
++ && tick_nohz_cpu_hotpluggable(cpu);
+ }
+ EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index 965163bdfe412..443726085f6c1 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -197,6 +197,7 @@ extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
+ extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
+ enum tick_dep_bits bit);
++extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
+
+ /*
+ * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
+@@ -261,6 +262,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+
+ static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+ static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
++static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
+
+ static inline void tick_dep_set(enum tick_dep_bits bit) { }
+ static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 7228bdd2eabe2..25c6efa2c5577 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -406,7 +406,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
+ tick_nohz_full_running = true;
+ }
+
+-static int tick_nohz_cpu_down(unsigned int cpu)
++bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
+ {
+ /*
+ * The boot CPU handles housekeeping duty (unbound timers,
+@@ -414,8 +414,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
+ * CPUs. It must remain online when nohz full is enabled.
+ */
+ if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+- return -EBUSY;
+- return 0;
++ return false;
++ return true;
++}
++
++static int tick_nohz_cpu_down(unsigned int cpu)
++{
++ return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
+ }
+
+ void __init tick_nohz_init(void)
+--
+2.39.2
+