--- /dev/null
+From b06c00715f57bfdb88e4123057538fbbdb9d7cdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 20:12:22 +0800
+Subject: bcache: Fix __bch_btree_node_alloc to make the failure behavior
+ consistent
+
+From: Zheng Wang <zyytlz.wz@163.com>
+
+[ Upstream commit 80fca8a10b604afad6c14213fdfd816c4eda3ee4 ]
+
+In some specific situations, the return value of __bch_btree_node_alloc
+may be NULL. This may lead to a potential NULL pointer dereference in
+caller function like a calling chain :
+btree_split->bch_btree_node_alloc->__bch_btree_node_alloc.
+
+Fix it by initializing the return value in __bch_btree_node_alloc.
+
+Fixes: cafe56359144 ("bcache: A block layer cache")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zheng Wang <zyytlz.wz@163.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+Link: https://lore.kernel.org/r/20230615121223.22502-6-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/btree.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 40eea56b9c900..71d670934a07e 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1125,10 +1125,12 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+ struct btree *parent)
+ {
+ BKEY_PADDED(key) k;
+- struct btree *b = ERR_PTR(-EAGAIN);
++ struct btree *b;
+
+ mutex_lock(&c->bucket_lock);
+ retry:
++ /* return ERR_PTR(-EAGAIN) when it fails */
++ b = ERR_PTR(-EAGAIN);
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ goto err;
+
+--
+2.39.2
+
--- /dev/null
+From 75d6f130860e662dce825cb29bcff5de15287e91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Oct 2020 14:50:45 +0800
+Subject: bcache: remove 'int n' from parameter list of bch_bucket_alloc_set()
+
+From: Coly Li <colyli@suse.de>
+
+[ Upstream commit 17e4aed8309ff28670271546c2c3263eb12f5eb6 ]
+
+The parameter 'int n' from bch_bucket_alloc_set() is not cleared
+defined. From the code comments n is the number of buckets to alloc, but
+from the code itself 'n' is the maximum cache to iterate. Indeed all the
+locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.
+
+This patch removes the confused and unnecessary 'int n' from parameter
+list of bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
+for its caller.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 80fca8a10b60 ("bcache: Fix __bch_btree_node_alloc to make the failure behavior consistent")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/alloc.c | 35 +++++++++++++++--------------------
+ drivers/md/bcache/bcache.h | 4 ++--
+ drivers/md/bcache/btree.c | 2 +-
+ drivers/md/bcache/super.c | 2 +-
+ 4 files changed, 19 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index a1df0d95151c6..5310e1f4a2826 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -49,7 +49,7 @@
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+- * bch_bucket_alloc_set() allocates one or more buckets from different caches
++ * bch_bucket_alloc_set() allocates one bucket from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
+ }
+
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+- int i;
++ struct cache *ca;
++ long b;
+
+ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+ return -1;
+
+ lockdep_assert_held(&c->bucket_lock);
+- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
+
+ bkey_init(k);
+
+- /* sort by free space/prio of oldest data in caches */
+-
+- for (i = 0; i < n; i++) {
+- struct cache *ca = c->cache_by_alloc[i];
+- long b = bch_bucket_alloc(ca, reserve, wait);
++ ca = c->cache_by_alloc[0];
++ b = bch_bucket_alloc(ca, reserve, wait);
++ if (b == -1)
++ goto err;
+
+- if (b == -1)
+- goto err;
++ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
++ bucket_to_sector(c, b),
++ ca->sb.nr_this_dev);
+
+- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
+- bucket_to_sector(c, b),
+- ca->sb.nr_this_dev);
+-
+- SET_KEY_PTRS(k, i + 1);
+- }
++ SET_KEY_PTRS(k, 1);
+
+ return 0;
+ err:
+@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ }
+
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait)
++ struct bkey *k, bool wait)
+ {
+ int ret;
+
+ mutex_lock(&c->bucket_lock);
+- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
++ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+ }
+@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
+
+ spin_unlock(&c->data_bucket_lock);
+
+- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
++ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 6a380ed4919a0..e81d783109847 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -952,9 +952,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
+
+ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
+ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+- struct bkey *k, int n, bool wait);
++ struct bkey *k, bool wait);
+ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
+ unsigned int sectors, unsigned int write_point,
+ unsigned int write_prio, bool wait);
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index ca0c6592a425e..40eea56b9c900 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1129,7 +1129,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
+
+ mutex_lock(&c->bucket_lock);
+ retry:
+- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
++ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
+ goto err;
+
+ bkey_put(c, &k.key);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 43bedd3795fc8..4b076f7f184be 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -423,7 +423,7 @@ static int __uuid_write(struct cache_set *c)
+ closure_init_stack(&cl);
+ lockdep_assert_held(&bch_register_lock);
+
+- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
++ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
+ return 1;
+
+ SET_KEY_SIZE(&k.key, c->sb.bucket_size);
+--
+2.39.2
+
--- /dev/null
+From 941d9f22890e6eb81644a08c42752e0c49f92837 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Oct 2018 20:41:19 +0800
+Subject: bcache: use MAX_CACHES_PER_SET instead of magic number 8 in
+ __bch_bucket_alloc_set
+
+From: Shenghui Wang <shhuiw@foxmail.com>
+
+[ Upstream commit 8792099f9ad487cf381f4e8199ff2158ba0f6eb5 ]
+
+Current cache_set has MAX_CACHES_PER_SET caches most, and the macro
+is used for
+"
+ struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
+"
+in the define of struct cache_set.
+
+Use MAX_CACHES_PER_SET instead of magic number 8 in
+__bch_bucket_alloc_set.
+
+Signed-off-by: Shenghui Wang <shhuiw@foxmail.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 80fca8a10b60 ("bcache: Fix __bch_btree_node_alloc to make the failure behavior consistent")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 46794cac167e7..a1df0d95151c6 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -497,7 +497,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ return -1;
+
+ lockdep_assert_held(&c->bucket_lock);
+- BUG_ON(!n || n > c->caches_loaded || n > 8);
++ BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
+
+ bkey_init(k);
+
+--
+2.39.2
+
--- /dev/null
+From f8efa6348227e1871a5524dbf5e438effd121ff6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jun 2023 11:27:38 +0100
+Subject: btrfs: fix extent buffer leak after tree mod log failure at
+ split_node()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ede600e497b1461d06d22a7d17703d9096868bc3 ]
+
+At split_node(), if we fail to log the tree mod log copy operation, we
+return without unlocking the split extent buffer we just allocated and
+without decrementing the reference we own on it. Fix this by unlocking
+it and decrementing the ref count before returning.
+
+Fixes: 5de865eebb83 ("Btrfs: fix tree mod logging")
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 34d56f0fa7501..e48c6d7a860f4 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -3590,6 +3590,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+
+ ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
+ if (ret) {
++ btrfs_tree_unlock(split);
++ free_extent_buffer(split);
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+--
+2.39.2
+
--- /dev/null
+From 0367393b2b1f89fc3b8cea798f06f4576c1a7867 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:32 -0400
+Subject: dlm: cleanup plock_op vs plock_xop
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit bcbb4ba6c9ba81e6975b642a2cade68044cd8a66 ]
+
+Lately the different casting between plock_op and plock_xop and list
+holders which was involved showed some issues which were hard to see.
+This patch removes the "plock_xop" structure and introduces a
+"struct plock_async_data". This structure will be set in "struct plock_op"
+in case of asynchronous lock handling as the original "plock_xop" was
+made for. There is no need anymore to cast pointers around for
+additional fields in case of asynchronous lock handling. As disadvantage
+another allocation was introduces but only needed in the asynchronous
+case which is currently only used in combination with nfs lockd.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 59e45c758ca1 ("fs: dlm: interrupt posix locks only when process is killed")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 77 ++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 46 insertions(+), 31 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 505cfe669762e..7912ef3706e26 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -22,20 +22,20 @@ static struct list_head recv_list;
+ static wait_queue_head_t send_wq;
+ static wait_queue_head_t recv_wq;
+
+-struct plock_op {
+- struct list_head list;
+- int done;
+- struct dlm_plock_info info;
+- int (*callback)(struct file_lock *fl, int result);
+-};
+-
+-struct plock_xop {
+- struct plock_op xop;
++struct plock_async_data {
+ void *fl;
+ void *file;
+ struct file_lock flc;
++ int (*callback)(struct file_lock *fl, int result);
+ };
+
++struct plock_op {
++ struct list_head list;
++ int done;
++ struct dlm_plock_info info;
++ /* if set indicates async handling */
++ struct plock_async_data *data;
++};
+
+ static inline void set_version(struct dlm_plock_info *info)
+ {
+@@ -61,6 +61,12 @@ static int check_version(struct dlm_plock_info *info)
+ return 0;
+ }
+
++static void dlm_release_plock_op(struct plock_op *op)
++{
++ kfree(op->data);
++ kfree(op);
++}
++
+ static void send_op(struct plock_op *op)
+ {
+ set_version(&op->info);
+@@ -104,22 +110,21 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ int cmd, struct file_lock *fl)
+ {
++ struct plock_async_data *op_data;
+ struct dlm_ls *ls;
+ struct plock_op *op;
+- struct plock_xop *xop;
+ int rv;
+
+ ls = dlm_find_lockspace_local(lockspace);
+ if (!ls)
+ return -EINVAL;
+
+- xop = kzalloc(sizeof(*xop), GFP_NOFS);
+- if (!xop) {
++ op = kzalloc(sizeof(*op), GFP_NOFS);
++ if (!op) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+- op = &xop->xop;
+ op->info.optype = DLM_PLOCK_OP_LOCK;
+ op->info.pid = fl->fl_pid;
+ op->info.ex = (fl->fl_type == F_WRLCK);
+@@ -128,22 +133,32 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ op->info.number = number;
+ op->info.start = fl->fl_start;
+ op->info.end = fl->fl_end;
++ /* async handling */
+ if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
++ op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
++ if (!op_data) {
++ dlm_release_plock_op(op);
++ rv = -ENOMEM;
++ goto out;
++ }
++
+ /* fl_owner is lockd which doesn't distinguish
+ processes on the nfs client */
+ op->info.owner = (__u64) fl->fl_pid;
+- op->callback = fl->fl_lmops->lm_grant;
+- locks_init_lock(&xop->flc);
+- locks_copy_lock(&xop->flc, fl);
+- xop->fl = fl;
+- xop->file = file;
++ op_data->callback = fl->fl_lmops->lm_grant;
++ locks_init_lock(&op_data->flc);
++ locks_copy_lock(&op_data->flc, fl);
++ op_data->fl = fl;
++ op_data->file = file;
++
++ op->data = op_data;
+ } else {
+ op->info.owner = (__u64)(long) fl->fl_owner;
+ }
+
+ send_op(op);
+
+- if (!op->callback) {
++ if (!op->data) {
+ rv = wait_event_interruptible(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+ log_debug(ls, "dlm_posix_lock: wait killed %llx",
+@@ -151,7 +166,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
+- kfree(xop);
++ dlm_release_plock_op(op);
+ do_unlock_close(ls, number, file, fl);
+ goto out;
+ }
+@@ -176,7 +191,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ (unsigned long long)number);
+ }
+
+- kfree(xop);
++ dlm_release_plock_op(op);
+ out:
+ dlm_put_lockspace(ls);
+ return rv;
+@@ -186,11 +201,11 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock);
+ /* Returns failure iff a successful lock operation should be canceled */
+ static int dlm_plock_callback(struct plock_op *op)
+ {
++ struct plock_async_data *op_data = op->data;
+ struct file *file;
+ struct file_lock *fl;
+ struct file_lock *flc;
+ int (*notify)(struct file_lock *fl, int result) = NULL;
+- struct plock_xop *xop = (struct plock_xop *)op;
+ int rv = 0;
+
+ spin_lock(&ops_lock);
+@@ -202,10 +217,10 @@ static int dlm_plock_callback(struct plock_op *op)
+ spin_unlock(&ops_lock);
+
+ /* check if the following 2 are still valid or make a copy */
+- file = xop->file;
+- flc = &xop->flc;
+- fl = xop->fl;
+- notify = op->callback;
++ file = op_data->file;
++ flc = &op_data->flc;
++ fl = op_data->fl;
++ notify = op_data->callback;
+
+ if (op->info.rv) {
+ notify(fl, op->info.rv);
+@@ -236,7 +251,7 @@ static int dlm_plock_callback(struct plock_op *op)
+ }
+
+ out:
+- kfree(xop);
++ dlm_release_plock_op(op);
+ return rv;
+ }
+
+@@ -306,7 +321,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ rv = 0;
+
+ out_free:
+- kfree(op);
++ dlm_release_plock_op(op);
+ out:
+ dlm_put_lockspace(ls);
+ fl->fl_flags = fl_flags;
+@@ -374,7 +389,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ rv = 0;
+ }
+
+- kfree(op);
++ dlm_release_plock_op(op);
+ out:
+ dlm_put_lockspace(ls);
+ return rv;
+@@ -410,7 +425,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ (the process did not make an unlock call). */
+
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+- kfree(op);
++ dlm_release_plock_op(op);
+
+ if (copy_to_user(u, &info, sizeof(info)))
+ return -EFAULT;
+@@ -442,7 +457,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ op->info.owner == info.owner) {
+ list_del_init(&op->list);
+ memcpy(&op->info, &info, sizeof(info));
+- if (op->callback)
++ if (op->data)
+ do_callback = 1;
+ else
+ op->done = 1;
+--
+2.39.2
+
--- /dev/null
+From 46d926a79b735c18392213f54fe1a28232de8974 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:33 -0400
+Subject: dlm: rearrange async condition return
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit a800ba77fd285c6391a82819867ac64e9ab3af46 ]
+
+This patch moves the return of FILE_LOCK_DEFERRED a little bit earlier
+than checking afterwards again if the request was an asynchronous request.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 59e45c758ca1 ("fs: dlm: interrupt posix locks only when process is killed")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 27 +++++++++++++--------------
+ 1 file changed, 13 insertions(+), 14 deletions(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 7912ef3706e26..54ed11013d062 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -152,26 +152,25 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ op_data->file = file;
+
+ op->data = op_data;
++
++ send_op(op);
++ rv = FILE_LOCK_DEFERRED;
++ goto out;
+ } else {
+ op->info.owner = (__u64)(long) fl->fl_owner;
+ }
+
+ send_op(op);
+
+- if (!op->data) {
+- rv = wait_event_interruptible(recv_wq, (op->done != 0));
+- if (rv == -ERESTARTSYS) {
+- log_debug(ls, "dlm_posix_lock: wait killed %llx",
+- (unsigned long long)number);
+- spin_lock(&ops_lock);
+- list_del(&op->list);
+- spin_unlock(&ops_lock);
+- dlm_release_plock_op(op);
+- do_unlock_close(ls, number, file, fl);
+- goto out;
+- }
+- } else {
+- rv = FILE_LOCK_DEFERRED;
++ rv = wait_event_interruptible(recv_wq, (op->done != 0));
++ if (rv == -ERESTARTSYS) {
++ log_debug(ls, "%s: wait killed %llx", __func__,
++ (unsigned long long)number);
++ spin_lock(&ops_lock);
++ list_del(&op->list);
++ spin_unlock(&ops_lock);
++ dlm_release_plock_op(op);
++ do_unlock_close(ls, number, file, fl);
+ goto out;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 013c513737dd88add8bb2b47564e14394bf32de1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 09:31:23 +0800
+Subject: ext4: Fix reusing stale buffer heads from last failed mounting
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+[ Upstream commit 26fb5290240dc31cae99b8b4dd2af7f46dfcba6b ]
+
+Following process makes ext4 load stale buffer heads from last failed
+mounting in a new mounting operation:
+mount_bdev
+ ext4_fill_super
+ | ext4_load_and_init_journal
+ | ext4_load_journal
+ | jbd2_journal_load
+ | load_superblock
+ | journal_get_superblock
+ | set_buffer_verified(bh) // buffer head is verified
+ | jbd2_journal_recover // failed caused by EIO
+ | goto failed_mount3a // skip 'sb->s_root' initialization
+ deactivate_locked_super
+ kill_block_super
+ generic_shutdown_super
+ if (sb->s_root)
+ // false, skip ext4_put_super->invalidate_bdev->
+ // invalidate_mapping_pages->mapping_evict_folio->
+ // filemap_release_folio->try_to_free_buffers, which
+ // cannot drop buffer head.
+ blkdev_put
+ blkdev_put_whole
+ if (atomic_dec_and_test(&bdev->bd_openers))
+ // false, systemd-udev happens to open the device. Then
+ // blkdev_flush_mapping->kill_bdev->truncate_inode_pages->
+ // truncate_inode_folio->truncate_cleanup_folio->
+ // folio_invalidate->block_invalidate_folio->
+ // filemap_release_folio->try_to_free_buffers will be skipped,
+ // dropping buffer head is missed again.
+
+Second mount:
+ext4_fill_super
+ ext4_load_and_init_journal
+ ext4_load_journal
+ ext4_get_journal
+ jbd2_journal_init_inode
+ journal_init_common
+ bh = getblk_unmovable
+ bh = __find_get_block // Found stale bh in last failed mounting
+ journal->j_sb_buffer = bh
+ jbd2_journal_load
+ load_superblock
+ journal_get_superblock
+ if (buffer_verified(bh))
+ // true, skip journal->j_format_version = 2, value is 0
+ jbd2_journal_recover
+ do_one_pass
+ next_log_block += count_tags(journal, bh)
+ // According to journal_tag_bytes(), 'tag_bytes' calculating is
+ // affected by jbd2_has_feature_csum3(), jbd2_has_feature_csum3()
+ // returns false because 'j->j_format_version >= 2' is not true,
+ // then we get wrong next_log_block. The do_one_pass may exit
+ // early whenoccuring non JBD2_MAGIC_NUMBER in 'next_log_block'.
+
+The filesystem is corrupted here, journal is partially replayed, and
+new journal sequence number actually is already used by last mounting.
+
+The invalidate_bdev() can drop all buffer heads even racing with bare
+reading block device(eg. systemd-udev), so we can fix it by invalidating
+bdev in error handling path in __ext4_fill_super().
+
+Fetch a reproducer in [Link].
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217171
+Fixes: 25ed6e8a54df ("jbd2: enable journal clients to enable v2 checksumming")
+Cc: stable@vger.kernel.org # v3.5
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230315013128.3911115-2-chengzhihao1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/super.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index da7ca0b73e4b4..926063a6d2321 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -907,6 +907,12 @@ static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
+ struct block_device *bdev;
+ bdev = sbi->s_journal_bdev;
+ if (bdev) {
++ /*
++ * Invalidate the journal device's buffers. We don't want them
++ * floating about in memory - the physical journal device may
++ * hotswapped, and it breaks the `ro-after' testing code.
++ */
++ invalidate_bdev(bdev);
+ ext4_blkdev_put(bdev);
+ sbi->s_journal_bdev = NULL;
+ }
+@@ -1033,13 +1039,7 @@ static void ext4_put_super(struct super_block *sb)
+ sync_blockdev(sb->s_bdev);
+ invalidate_bdev(sb->s_bdev);
+ if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
+- /*
+- * Invalidate the journal device's buffers. We don't want them
+- * floating about in memory - the physical journal device may
+- * hotswapped, and it breaks the `ro-after' testing code.
+- */
+ sync_blockdev(sbi->s_journal_bdev);
+- invalidate_bdev(sbi->s_journal_bdev);
+ ext4_blkdev_remove(sbi);
+ }
+ if (sbi->s_ea_inode_cache) {
+@@ -4673,6 +4673,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ ext4_blkdev_remove(sbi);
+ brelse(bh);
+ out_fail:
++ invalidate_bdev(sb->s_bdev);
+ sb->s_fs_info = NULL;
+ kfree(sbi->s_blockgroup_lock);
+ out_free_base:
+--
+2.39.2
+
--- /dev/null
+From b5565f69000cac0fca2cef25238532400a33395d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Sep 2020 11:03:42 +0800
+Subject: ext4: rename journal_dev to s_journal_dev inside ext4_sb_info
+
+From: Chunguang Xu <brookxu@tencent.com>
+
+[ Upstream commit ee7ed3aa0f08621dbf897d2a98dc6f2c7e7d0335 ]
+
+Rename journal_dev to s_journal_dev inside ext4_sb_info, keep
+the naming rules consistent with other variables, which is
+convenient for code reading and writing.
+
+Signed-off-by: Chunguang Xu <brookxu@tencent.com>
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Link: https://lore.kernel.org/r/1600916623-544-1-git-send-email-brookxu@tencent.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 26fb5290240d ("ext4: Fix reusing stale buffer heads from last failed mounting")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 2 +-
+ fs/ext4/fsmap.c | 8 ++++----
+ fs/ext4/super.c | 14 +++++++-------
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 19e2a52d1e5a1..909f231a387d7 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1412,7 +1412,7 @@ struct ext4_sb_info {
+ unsigned long s_commit_interval;
+ u32 s_max_batch_time;
+ u32 s_min_batch_time;
+- struct block_device *journal_bdev;
++ struct block_device *s_journal_bdev;
+ #ifdef CONFIG_QUOTA
+ /* Names of quota files with journalled quota */
+ char __rcu *s_qf_names[EXT4_MAXQUOTAS];
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 6b52ace1463c2..69c76327792e0 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -576,8 +576,8 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
+ if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
+ fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
+ return true;
+- if (EXT4_SB(sb)->journal_bdev &&
+- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->journal_bdev->bd_dev))
++ if (EXT4_SB(sb)->s_journal_bdev &&
++ fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
+ return true;
+ return false;
+ }
+@@ -647,9 +647,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
+ memset(handlers, 0, sizeof(handlers));
+ handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
+ handlers[0].gfd_fn = ext4_getfsmap_datadev;
+- if (EXT4_SB(sb)->journal_bdev) {
++ if (EXT4_SB(sb)->s_journal_bdev) {
+ handlers[1].gfd_dev = new_encode_dev(
+- EXT4_SB(sb)->journal_bdev->bd_dev);
++ EXT4_SB(sb)->s_journal_bdev->bd_dev);
+ handlers[1].gfd_fn = ext4_getfsmap_logdev;
+ }
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ce5abd25eb99c..da7ca0b73e4b4 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -905,10 +905,10 @@ static void ext4_blkdev_put(struct block_device *bdev)
+ static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
+ {
+ struct block_device *bdev;
+- bdev = sbi->journal_bdev;
++ bdev = sbi->s_journal_bdev;
+ if (bdev) {
+ ext4_blkdev_put(bdev);
+- sbi->journal_bdev = NULL;
++ sbi->s_journal_bdev = NULL;
+ }
+ }
+
+@@ -1032,14 +1032,14 @@ static void ext4_put_super(struct super_block *sb)
+
+ sync_blockdev(sb->s_bdev);
+ invalidate_bdev(sb->s_bdev);
+- if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
++ if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) {
+ /*
+ * Invalidate the journal device's buffers. We don't want them
+ * floating about in memory - the physical journal device may
+ * hotswapped, and it breaks the `ro-after' testing code.
+ */
+- sync_blockdev(sbi->journal_bdev);
+- invalidate_bdev(sbi->journal_bdev);
++ sync_blockdev(sbi->s_journal_bdev);
++ invalidate_bdev(sbi->s_journal_bdev);
+ ext4_blkdev_remove(sbi);
+ }
+ if (sbi->s_ea_inode_cache) {
+@@ -3537,7 +3537,7 @@ int ext4_calculate_overhead(struct super_block *sb)
+ * Add the internal journal blocks whether the journal has been
+ * loaded or not
+ */
+- if (sbi->s_journal && !sbi->journal_bdev)
++ if (sbi->s_journal && !sbi->s_journal_bdev)
+ overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
+ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
+ /* j_inum for internal journal is non-zero */
+@@ -4848,7 +4848,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
+ be32_to_cpu(journal->j_superblock->s_nr_users));
+ goto out_journal;
+ }
+- EXT4_SB(sb)->journal_bdev = bdev;
++ EXT4_SB(sb)->s_journal_bdev = bdev;
+ ext4_init_journal_params(sb, journal);
+ return journal;
+
+--
+2.39.2
+
--- /dev/null
+From cb6b2e0f21a0785609512ce1698c9ed4c6403947 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 May 2023 11:21:26 -0400
+Subject: fs: dlm: interrupt posix locks only when process is killed
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 59e45c758ca1b9893ac923dd63536da946ac333b ]
+
+If a posix lock request is waiting for a result from user space
+(dlm_controld), do not let it be interrupted unless the process
+is killed. This reverts commit a6b1533e9a57 ("dlm: make posix locks
+interruptible"). The problem with the interruptible change is
+that all locks were cleared on any signal interrupt. If a signal
+was received that did not terminate the process, the process
+could continue running after all its dlm posix locks had been
+cleared. A future patch will add cancelation to allow proper
+interruption.
+
+Cc: stable@vger.kernel.org
+Fixes: a6b1533e9a57 ("dlm: make posix locks interruptible")
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/plock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index 54ed11013d062..9fef426ce6f41 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -162,7 +162,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+
+ send_op(op);
+
+- rv = wait_event_interruptible(recv_wq, (op->done != 0));
++ rv = wait_event_killable(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+ log_debug(ls, "%s: wait killed %llx", __func__,
+ (unsigned long long)number);
+--
+2.39.2
+
--- /dev/null
+From 7fce63a1f7c66137411c9d5d200121ccdb1bba92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Oct 2019 14:38:07 -0400
+Subject: ftrace: Add information on number of page groups allocated
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit da537f0aef1372c5204356a7df06be8769467b7b ]
+
+Looking for ways to shrink the size of the dyn_ftrace structure, knowing the
+information about how many pages and the number of groups of those pages, is
+useful in working out the best ways to save on memory.
+
+This adds one info print on how many groups of pages were used to allocate
+the ftrace dyn_ftrace structures, and also shows the number of pages and
+groups in the dyn_ftrace_total_info (which is used for debugging).
+
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Stable-dep-of: 26efd79c4624 ("ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 14 ++++++++++++++
+ kernel/trace/trace.c | 21 +++++++++++++++------
+ kernel/trace/trace.h | 2 ++
+ 3 files changed, 31 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 5c0463dbe16ee..48ab4d750c650 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2915,6 +2915,8 @@ static void ftrace_shutdown_sysctl(void)
+
+ static u64 ftrace_update_time;
+ unsigned long ftrace_update_tot_cnt;
++unsigned long ftrace_number_of_pages;
++unsigned long ftrace_number_of_groups;
+
+ static inline int ops_traces_mod(struct ftrace_ops *ops)
+ {
+@@ -3039,6 +3041,9 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+ goto again;
+ }
+
++ ftrace_number_of_pages += 1 << order;
++ ftrace_number_of_groups++;
++
+ cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
+ pg->size = cnt;
+
+@@ -3094,6 +3099,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ start_pg = pg->next;
+ kfree(pg);
+ pg = start_pg;
++ ftrace_number_of_pages -= 1 << order;
++ ftrace_number_of_groups--;
+ }
+ pr_info("ftrace: FAILED to allocate memory for functions\n");
+ return NULL;
+@@ -5839,6 +5846,8 @@ void ftrace_release_mod(struct module *mod)
+ free_pages((unsigned long)pg->records, order);
+ tmp_page = pg->next;
+ kfree(pg);
++ ftrace_number_of_pages -= 1 << order;
++ ftrace_number_of_groups--;
+ }
+ }
+
+@@ -6184,6 +6193,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ *last_pg = pg->next;
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+ free_pages((unsigned long)pg->records, order);
++ ftrace_number_of_pages -= 1 << order;
++ ftrace_number_of_groups--;
+ kfree(pg);
+ pg = container_of(last_pg, struct ftrace_page, next);
+ if (!(*last_pg))
+@@ -6239,6 +6250,9 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
++ pr_info("ftrace: allocated %ld pages with %ld groups\n",
++ ftrace_number_of_pages, ftrace_number_of_groups);
++
+ set_ftrace_early_filters();
+
+ return;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 98abff0462366..9da7b10e56d23 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7107,14 +7107,23 @@ static ssize_t
+ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+ {
+- unsigned long *p = filp->private_data;
+- char buf[64]; /* Not too big for a shallow stack */
++ ssize_t ret;
++ char *buf;
+ int r;
+
+- r = scnprintf(buf, 63, "%ld", *p);
+- buf[r++] = '\n';
++ /* 256 should be plenty to hold the amount needed */
++ buf = kmalloc(256, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
+
+- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++ r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
++ ftrace_update_tot_cnt,
++ ftrace_number_of_pages,
++ ftrace_number_of_groups);
++
++ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++ kfree(buf);
++ return ret;
+ }
+
+ static const struct file_operations tracing_dyn_info_fops = {
+@@ -8246,7 +8255,7 @@ static __init int tracer_init_tracefs(void)
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
+- &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
++ NULL, &tracing_dyn_info_fops);
+ #endif
+
+ create_trace_instances(d_tracer);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 0923d1b18d1fb..f4d83b552a477 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -748,6 +748,8 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
+
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ extern unsigned long ftrace_update_tot_cnt;
++extern unsigned long ftrace_number_of_pages;
++extern unsigned long ftrace_number_of_groups;
+ void ftrace_init_trace_array(struct trace_array *tr);
+ #else
+ static inline void ftrace_init_trace_array(struct trace_array *tr) { }
+--
+2.39.2
+
--- /dev/null
+From 82d0c3cbc9c2961a91d2463724758e7895fd11ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Mar 2021 09:58:38 -0400
+Subject: ftrace: Check if pages were allocated before calling free_pages()
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit 59300b36f85f254260c81d9dd09195fa49eb0f98 ]
+
+It is possible that on error pg->size can be zero when getting its order,
+which would return a -1 value. It is dangerous to pass in an order of -1
+to free_pages(). Check if order is greater than or equal to zero before
+calling free_pages().
+
+Link: https://lore.kernel.org/lkml/20210330093916.432697c7@gandalf.local.home/
+
+Reported-by: Abaci Robot <abaci@linux.alibaba.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Stable-dep-of: 26efd79c4624 ("ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 48ab4d750c650..1b92a22086f50 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3095,7 +3095,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ pg = start_pg;
+ while (pg) {
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- free_pages((unsigned long)pg->records, order);
++ if (order >= 0)
++ free_pages((unsigned long)pg->records, order);
+ start_pg = pg->next;
+ kfree(pg);
+ pg = start_pg;
+@@ -5843,7 +5844,8 @@ void ftrace_release_mod(struct module *mod)
+ clear_mod_from_hashes(pg);
+
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- free_pages((unsigned long)pg->records, order);
++ if (order >= 0)
++ free_pages((unsigned long)pg->records, order);
+ tmp_page = pg->next;
+ kfree(pg);
+ ftrace_number_of_pages -= 1 << order;
+@@ -6192,7 +6194,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ if (!pg->index) {
+ *last_pg = pg->next;
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- free_pages((unsigned long)pg->records, order);
++ if (order >= 0)
++ free_pages((unsigned long)pg->records, order);
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
+ kfree(pg);
+--
+2.39.2
+
--- /dev/null
+From 8e471f5b39d2372159b1ad251d64b5bcf1158da6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jul 2023 14:04:52 +0800
+Subject: ftrace: Fix possible warning on checking all pages used in
+ ftrace_process_locs()
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit 26efd79c4624294e553aeaa3439c646729bad084 ]
+
+As comments in ftrace_process_locs(), there may be NULL pointers in
+mcount_loc section:
+ > Some architecture linkers will pad between
+ > the different mcount_loc sections of different
+ > object files to satisfy alignments.
+ > Skip any NULL pointers.
+
+After commit 20e5227e9f55 ("ftrace: allow NULL pointers in mcount_loc"),
+NULL pointers will be accounted when allocating ftrace pages but skipped
+before adding into ftrace pages, this may result in some pages not being
+used. Then after commit 706c81f87f84 ("ftrace: Remove extra helper
+functions"), warning may occur at:
+ WARN_ON(pg->next);
+
+To fix it, only warn for case that no pointers skipped but pages not used
+up, then free those unused pages after releasing ftrace_lock.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230712060452.3175675-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: 706c81f87f84 ("ftrace: Remove extra helper functions")
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 45 +++++++++++++++++++++++++++++--------------
+ 1 file changed, 31 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6b1ba7f510e2c..81f5c9c85d066 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3053,6 +3053,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+ return cnt;
+ }
+
++static void ftrace_free_pages(struct ftrace_page *pages)
++{
++ struct ftrace_page *pg = pages;
++
++ while (pg) {
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
++ pages = pg->next;
++ kfree(pg);
++ pg = pages;
++ ftrace_number_of_groups--;
++ }
++}
++
+ static struct ftrace_page *
+ ftrace_allocate_pages(unsigned long num_to_init)
+ {
+@@ -3091,17 +3107,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ return start_pg;
+
+ free_pages:
+- pg = start_pg;
+- while (pg) {
+- if (pg->records) {
+- free_pages((unsigned long)pg->records, pg->order);
+- ftrace_number_of_pages -= 1 << pg->order;
+- }
+- start_pg = pg->next;
+- kfree(pg);
+- pg = start_pg;
+- ftrace_number_of_groups--;
+- }
++ ftrace_free_pages(start_pg);
+ pr_info("ftrace: FAILED to allocate memory for functions\n");
+ return NULL;
+ }
+@@ -5593,9 +5599,11 @@ static int ftrace_process_locs(struct module *mod,
+ unsigned long *start,
+ unsigned long *end)
+ {
++ struct ftrace_page *pg_unuse = NULL;
+ struct ftrace_page *start_pg;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
++ unsigned long skipped = 0;
+ unsigned long count;
+ unsigned long *p;
+ unsigned long addr;
+@@ -5649,8 +5657,10 @@ static int ftrace_process_locs(struct module *mod,
+ * object files to satisfy alignments.
+ * Skip any NULL pointers.
+ */
+- if (!addr)
++ if (!addr) {
++ skipped++;
+ continue;
++ }
+
+ end_offset = (pg->index+1) * sizeof(pg->records[0]);
+ if (end_offset > PAGE_SIZE << pg->order) {
+@@ -5664,8 +5674,10 @@ static int ftrace_process_locs(struct module *mod,
+ rec->ip = addr;
+ }
+
+- /* We should have used all pages */
+- WARN_ON(pg->next);
++ if (pg->next) {
++ pg_unuse = pg->next;
++ pg->next = NULL;
++ }
+
+ /* Assign the last page to ftrace_pages */
+ ftrace_pages = pg;
+@@ -5687,6 +5699,11 @@ static int ftrace_process_locs(struct module *mod,
+ out:
+ mutex_unlock(&ftrace_lock);
+
++ /* We should have used all pages unless we skipped some */
++ if (pg_unuse) {
++ WARN_ON(!skipped);
++ ftrace_free_pages(pg_unuse);
++ }
+ return ret;
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 7307e1624861159322352b2461909f5aa35af51f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 1 Apr 2021 16:14:17 -0400
+Subject: ftrace: Store the order of pages allocated in ftrace_page
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit db42523b4f3e83ff86b53cdda219a9767c8b047f ]
+
+Instead of saving the size of the records field of the ftrace_page, store
+the order it uses to allocate the pages, as that is what is needed to know
+in order to free the pages. This simplifies the code.
+
+Link: https://lore.kernel.org/lkml/CAHk-=whyMxheOqXAORt9a7JK9gc9eHTgCJ55Pgs4p=X3RrQubQ@mail.gmail.com/
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ change log written by Steven Rostedt ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Stable-dep-of: 26efd79c4624 ("ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 35 +++++++++++++++++------------------
+ 1 file changed, 17 insertions(+), 18 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 1b92a22086f50..6b1ba7f510e2c 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1124,7 +1124,7 @@ struct ftrace_page {
+ struct ftrace_page *next;
+ struct dyn_ftrace *records;
+ int index;
+- int size;
++ int order;
+ };
+
+ #define ENTRY_SIZE sizeof(struct dyn_ftrace)
+@@ -3045,7 +3045,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
+ ftrace_number_of_groups++;
+
+ cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
+- pg->size = cnt;
++ pg->order = order;
+
+ if (cnt > count)
+ cnt = count;
+@@ -3058,7 +3058,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ {
+ struct ftrace_page *start_pg;
+ struct ftrace_page *pg;
+- int order;
+ int cnt;
+
+ if (!num_to_init)
+@@ -3094,13 +3093,13 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ free_pages:
+ pg = start_pg;
+ while (pg) {
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ start_pg = pg->next;
+ kfree(pg);
+ pg = start_pg;
+- ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
+ }
+ pr_info("ftrace: FAILED to allocate memory for functions\n");
+@@ -5642,6 +5641,7 @@ static int ftrace_process_locs(struct module *mod,
+ p = start;
+ pg = start_pg;
+ while (p < end) {
++ unsigned long end_offset;
+ addr = ftrace_call_adjust(*p++);
+ /*
+ * Some architecture linkers will pad between
+@@ -5652,7 +5652,8 @@ static int ftrace_process_locs(struct module *mod,
+ if (!addr)
+ continue;
+
+- if (pg->index == pg->size) {
++ end_offset = (pg->index+1) * sizeof(pg->records[0]);
++ if (end_offset > PAGE_SIZE << pg->order) {
+ /* We should have allocated enough */
+ if (WARN_ON(!pg->next))
+ break;
+@@ -5792,7 +5793,6 @@ void ftrace_release_mod(struct module *mod)
+ struct ftrace_page **last_pg;
+ struct ftrace_page *tmp_page = NULL;
+ struct ftrace_page *pg;
+- int order;
+
+ mutex_lock(&ftrace_lock);
+
+@@ -5843,12 +5843,12 @@ void ftrace_release_mod(struct module *mod)
+ /* Needs to be called outside of ftrace_lock */
+ clear_mod_from_hashes(pg);
+
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ tmp_page = pg->next;
+ kfree(pg);
+- ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
+ }
+ }
+@@ -6155,7 +6155,6 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ struct ftrace_mod_map *mod_map = NULL;
+ struct ftrace_init_func *func, *func_next;
+ struct list_head clear_hash;
+- int order;
+
+ INIT_LIST_HEAD(&clear_hash);
+
+@@ -6193,10 +6192,10 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ ftrace_update_tot_cnt--;
+ if (!pg->index) {
+ *last_pg = pg->next;
+- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+- if (order >= 0)
+- free_pages((unsigned long)pg->records, order);
+- ftrace_number_of_pages -= 1 << order;
++ if (pg->records) {
++ free_pages((unsigned long)pg->records, pg->order);
++ ftrace_number_of_pages -= 1 << pg->order;
++ }
+ ftrace_number_of_groups--;
+ kfree(pg);
+ pg = container_of(last_pg, struct ftrace_page, next);
+--
+2.39.2
+
--- /dev/null
+From 49ddd59e08ff8824348bfed0db0441cf6088752a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jul 2023 14:34:25 +0200
+Subject: gpio: tps68470: Make tps68470_gpio_output() always set the initial
+ value
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 5a7adc6c1069ce31ef4f606ae9c05592c80a6ab5 ]
+
+Make tps68470_gpio_output() call tps68470_gpio_set() for output-only pins
+too, so that the initial value passed to gpiod_direction_output() is
+honored for these pins too.
+
+Fixes: 275b13a65547 ("gpio: Add support for TPS68470 GPIOs")
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Reviewed-by: Daniel Scally <dan.scally@ideasonboard.com>
+Tested-by: Daniel Scally <dan.scally@ideasonboard.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-tps68470.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
+index aff6e504c6668..9704cff9b4aa3 100644
+--- a/drivers/gpio/gpio-tps68470.c
++++ b/drivers/gpio/gpio-tps68470.c
+@@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+
++ /* Set the initial value */
++ tps68470_gpio_set(gc, offset, value);
++
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return 0;
+
+- /* Set the initial value */
+- tps68470_gpio_set(gc, offset, value);
+-
+ return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ TPS68470_GPIO_MODE_MASK,
+ TPS68470_GPIO_MODE_OUT_CMOS);
+--
+2.39.2
+
--- /dev/null
+From acdac95be473026b932ce7398769cc8dac26b582 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 May 2023 11:39:23 +0300
+Subject: PCI/ASPM: Avoid link retraining race
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit e7e39756363ad5bd83ddeae1063193d0f13870fd ]
+
+PCIe r6.0.1, sec 7.5.3.7, recommends setting the link control parameters,
+then waiting for the Link Training bit to be clear before setting the
+Retrain Link bit.
+
+This avoids a race where the LTSSM may not use the updated parameters if it
+is already in the midst of link training because of other normal link
+activity.
+
+Wait for the Link Training bit to be clear before toggling the Retrain Link
+bit to ensure that the LTSSM uses the updated link control parameters.
+
+[bhelgaas: commit log, return 0 (success)/-ETIMEDOUT instead of bool for
+both pcie_wait_for_retrain() and the existing pcie_retrain_link()]
+Suggested-by: Lukas Wunner <lukas@wunner.de>
+Fixes: 7d715a6c1ae5 ("PCI: add PCI Express ASPM support")
+Link: https://lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pcie/aspm.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 7b1fb6cb16fba..eec62f7377f48 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -223,8 +223,19 @@ static int pcie_wait_for_retrain(struct pci_dev *pdev)
+ static int pcie_retrain_link(struct pcie_link_state *link)
+ {
+ struct pci_dev *parent = link->pdev;
++ int rc;
+ u16 reg16;
+
++ /*
++ * Ensure the updated LNKCTL parameters are used during link
++ * training by checking that there is no ongoing link training to
++ * avoid LTSSM race as recommended in Implementation Note at the
++ * end of PCIe r6.0.1 sec 7.5.3.7.
++ */
++ rc = pcie_wait_for_retrain(parent);
++ if (rc)
++ return rc;
++
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+--
+2.39.2
+
--- /dev/null
+From a03e2f07f1ad1ad75001fb6610db774fa09566ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 14:49:33 -0500
+Subject: PCI/ASPM: Factor out pcie_wait_for_retrain()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 9c7f136433d26592cb4d9cd00b4e15c33d9797c6 ]
+
+Factor pcie_wait_for_retrain() out from pcie_retrain_link(). No functional
+change intended.
+
+[bhelgaas: split out from
+https: //lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com]
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: e7e39756363a ("PCI/ASPM: Avoid link retraining race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pcie/aspm.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 86bf1356cfa3f..7b1fb6cb16fba 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -203,10 +203,26 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ link->clkpm_disable = blacklist ? 1 : 0;
+ }
+
++static int pcie_wait_for_retrain(struct pci_dev *pdev)
++{
++ unsigned long end_jiffies;
++ u16 reg16;
++
++ /* Wait for Link Training to be cleared by hardware */
++ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
++ do {
++ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, ®16);
++ if (!(reg16 & PCI_EXP_LNKSTA_LT))
++ return 0;
++ msleep(1);
++ } while (time_before(jiffies, end_jiffies));
++
++ return -ETIMEDOUT;
++}
++
+ static int pcie_retrain_link(struct pcie_link_state *link)
+ {
+ struct pci_dev *parent = link->pdev;
+- unsigned long end_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
+@@ -222,17 +238,7 @@ static int pcie_retrain_link(struct pcie_link_state *link)
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+- /* Wait for link training end. Break out after waiting for timeout */
+- end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+- do {
+- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
+- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+- break;
+- msleep(1);
+- } while (time_before(jiffies, end_jiffies));
+- if (reg16 & PCI_EXP_LNKSTA_LT)
+- return -ETIMEDOUT;
+- return 0;
++ return pcie_wait_for_retrain(parent);
+ }
+
+ /*
+--
+2.39.2
+
--- /dev/null
+From 324cda3281215817b11bd3c95277c25a6c02a063 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jun 2023 14:44:55 -0500
+Subject: PCI/ASPM: Return 0 or -ETIMEDOUT from pcie_retrain_link()
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit f5297a01ee805d7fa569d288ed65fc0f9ac9b03d ]
+
+"pcie_retrain_link" is not a question with a true/false answer, so "bool"
+isn't quite the right return type. Return 0 for success or -ETIMEDOUT if
+the retrain failed. No functional change intended.
+
+[bhelgaas: based on Ilpo's patch below]
+Link: https://lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: e7e39756363a ("PCI/ASPM: Avoid link retraining race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pcie/aspm.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 598e246fa70ed..86bf1356cfa3f 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -203,7 +203,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ link->clkpm_disable = blacklist ? 1 : 0;
+ }
+
+-static bool pcie_retrain_link(struct pcie_link_state *link)
++static int pcie_retrain_link(struct pcie_link_state *link)
+ {
+ struct pci_dev *parent = link->pdev;
+ unsigned long end_jiffies;
+@@ -230,7 +230,9 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
+ break;
+ msleep(1);
+ } while (time_before(jiffies, end_jiffies));
+- return !(reg16 & PCI_EXP_LNKSTA_LT);
++ if (reg16 & PCI_EXP_LNKSTA_LT)
++ return -ETIMEDOUT;
++ return 0;
+ }
+
+ /*
+@@ -299,15 +301,15 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+ reg16 &= ~PCI_EXP_LNKCTL_CCC;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+
+- if (pcie_retrain_link(link))
+- return;
++ if (pcie_retrain_link(link)) {
+
+- /* Training failed. Restore common clock configurations */
+- pci_err(parent, "ASPM: Could not configure common clock\n");
+- list_for_each_entry(child, &linkbus->devices, bus_list)
+- pcie_capability_write_word(child, PCI_EXP_LNKCTL,
++ /* Training failed. Restore common clock configurations */
++ pci_err(parent, "ASPM: Could not configure common clock\n");
++ list_for_each_entry(child, &linkbus->devices, bus_list)
++ pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
+- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
++ }
+ }
+
+ /* Convert L0s latency encoding to ns */
+--
+2.39.2
+
--- /dev/null
+From 5f19ed01df565cd694687ba28e07661f34956445 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Mar 2019 18:07:36 +0100
+Subject: PCI: Rework pcie_retrain_link() wait loop
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stefan Mätje <stefan.maetje@esd.eu>
+
+[ Upstream commit 658eec837b11fbfab9082ebf8da24d94cefa47c0 ]
+
+Transform wait code to a "do {} while (time_before())" loop as recommended
+by reviewer. No functional change intended.
+
+Signed-off-by: Stefan Mätje <stefan.maetje@esd.eu>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: e7e39756363a ("PCI/ASPM: Avoid link retraining race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pcie/aspm.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 279f9f0197b01..598e246fa70ed 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -206,7 +206,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+ static bool pcie_retrain_link(struct pcie_link_state *link)
+ {
+ struct pci_dev *parent = link->pdev;
+- unsigned long start_jiffies;
++ unsigned long end_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
+@@ -223,15 +223,13 @@ static bool pcie_retrain_link(struct pcie_link_state *link)
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+- start_jiffies = jiffies;
+- for (;;) {
++ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
++ do {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+- break;
+ msleep(1);
+- }
++ } while (time_before(jiffies, end_jiffies));
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+ }
+
+--
+2.39.2
+
--- /dev/null
+From 049b89b0a8970f9cb4a0e4bbfc4b6172c0e1f026 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jun 2023 17:08:36 +0530
+Subject: scsi: qla2xxx: Array index may go out of bound
+
+From: Nilesh Javali <njavali@marvell.com>
+
+[ Upstream commit d721b591b95cf3f290f8a7cbe90aa2ee0368388d ]
+
+Klocwork reports array 'vha->host_str' of size 16 may use index value(s)
+16..19. Use snprintf() instead of sprintf().
+
+Cc: stable@vger.kernel.org
+Co-developed-by: Bikash Hazarika <bhazarika@marvell.com>
+Signed-off-by: Bikash Hazarika <bhazarika@marvell.com>
+Signed-off-by: Nilesh Javali <njavali@marvell.com>
+Link: https://lore.kernel.org/r/20230607113843.37185-2-njavali@marvell.com
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla2xxx/qla_os.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 73f3e51ce9798..4580774b2c3e7 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -4604,7 +4604,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
+- sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
++ snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
++ QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+ "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ vha->host, vha->hw, vha,
+--
+2.39.2
+
--- /dev/null
+From 4892b2e047eea288d05d9f8f0d5438e8367fd5cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Sep 2020 10:25:14 +0800
+Subject: scsi: qla2xxx: Fix inconsistent format argument type in qla_os.c
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 250bd00923c72c846092271a9e51ee373db081b6 ]
+
+Fix the following warnings:
+
+[drivers/scsi/qla2xxx/qla_os.c:4882]: (warning) %ld in format string (no. 2)
+ requires 'long' but the argument type is 'unsigned long'.
+[drivers/scsi/qla2xxx/qla_os.c:5011]: (warning) %ld in format string (no. 1)
+ requires 'long' but the argument type is 'unsigned long'.
+
+Link: https://lore.kernel.org/r/20200930022515.2862532-3-yebin10@huawei.com
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Reviewed-by: Nilesh Javali <njavali@marvell.com>
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: d721b591b95c ("scsi: qla2xxx: Array index may go out of bound")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla2xxx/qla_os.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7863ad1390f8a..73f3e51ce9798 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -4604,7 +4604,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
+- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
++ sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+ "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ vha->host, vha->hw, vha,
+@@ -4735,7 +4735,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
+
+ switch (code) {
+ case QLA_UEVENT_CODE_FW_DUMP:
+- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
++ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
+ vha->host_no);
+ break;
+ default:
+--
+2.39.2
+
tcp-annotate-data-races-around-tp-notsent_lowat.patch
tcp-annotate-data-races-around-fastopenq.max_qlen.patch
tracing-histograms-return-an-error-if-we-fail-to-add-histogram-to-hist_vars-list.patch
+gpio-tps68470-make-tps68470_gpio_output-always-set-t.patch
+bcache-use-max_caches_per_set-instead-of-magic-numbe.patch
+bcache-remove-int-n-from-parameter-list-of-bch_bucke.patch
+bcache-fix-__bch_btree_node_alloc-to-make-the-failur.patch
+btrfs-fix-extent-buffer-leak-after-tree-mod-log-fail.patch
+ext4-rename-journal_dev-to-s_journal_dev-inside-ext4.patch
+ext4-fix-reusing-stale-buffer-heads-from-last-failed.patch
+pci-rework-pcie_retrain_link-wait-loop.patch
+pci-aspm-return-0-or-etimedout-from-pcie_retrain_lin.patch
+pci-aspm-factor-out-pcie_wait_for_retrain.patch
+pci-aspm-avoid-link-retraining-race.patch
+dlm-cleanup-plock_op-vs-plock_xop.patch
+dlm-rearrange-async-condition-return.patch
+fs-dlm-interrupt-posix-locks-only-when-process-is-ki.patch
+ftrace-add-information-on-number-of-page-groups-allo.patch
+ftrace-check-if-pages-were-allocated-before-calling-.patch
+ftrace-store-the-order-of-pages-allocated-in-ftrace_.patch
+ftrace-fix-possible-warning-on-checking-all-pages-us.patch
+scsi-qla2xxx-fix-inconsistent-format-argument-type-i.patch
+scsi-qla2xxx-array-index-may-go-out-of-bound.patch