--- /dev/null
+From 5eeebf927b195eee79fd4c518abdcd900935a0fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Apr 2023 12:17:35 -0400
+Subject: NFS: Cleanup unused rpc_clnt variable
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit e025f0a73f6acb920d86549b2177a5883535421d ]
+
+The root rpc_clnt is not used here, clean it up.
+
+Fixes: 4dc73c679114 ("NFSv4: keep state manager thread active if swap is enabled")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Reviewed-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Stable-dep-of: 956fd46f97d2 ("NFSv4: Fix a state manager thread deadlock regression")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4state.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index ff6ca05a9d441..3fcef19e91984 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1212,10 +1212,6 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ {
+ struct task_struct *task;
+ char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+- struct rpc_clnt *cl = clp->cl_rpcclient;
+-
+- while (cl != cl->cl_parent)
+- cl = cl->cl_parent;
+
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
+--
+2.40.1
+
--- /dev/null
+From 004883047af44cd1484e7413d0963433daab612c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Jun 2023 14:07:22 -0400
+Subject: NFS: rename nfs_client_kset to nfs_kset
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit 8b18a2edecc0741b0eecf8b18fdb356a0f8682de ]
+
+Be brief and match the subsystem name. There's no need to distinguish this
+kset variable from the server.
+
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Stable-dep-of: 956fd46f97d2 ("NFSv4: Fix a state manager thread deadlock regression")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/sysfs.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
+index 8cb70755e3c9e..f7f778e3e5ca7 100644
+--- a/fs/nfs/sysfs.c
++++ b/fs/nfs/sysfs.c
+@@ -18,7 +18,7 @@
+ #include "sysfs.h"
+
+ struct kobject *nfs_client_kobj;
+-static struct kset *nfs_client_kset;
++static struct kset *nfs_kset;
+
+ static void nfs_netns_object_release(struct kobject *kobj)
+ {
+@@ -55,13 +55,13 @@ static struct kobject *nfs_netns_object_alloc(const char *name,
+
+ int nfs_sysfs_init(void)
+ {
+- nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
+- if (!nfs_client_kset)
++ nfs_kset = kset_create_and_add("nfs", NULL, fs_kobj);
++ if (!nfs_kset)
+ return -ENOMEM;
+- nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
++ nfs_client_kobj = nfs_netns_object_alloc("net", nfs_kset, NULL);
+ if (!nfs_client_kobj) {
+- kset_unregister(nfs_client_kset);
+- nfs_client_kset = NULL;
++ kset_unregister(nfs_kset);
++ nfs_kset = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+@@ -70,7 +70,7 @@ int nfs_sysfs_init(void)
+ void nfs_sysfs_exit(void)
+ {
+ kobject_put(nfs_client_kobj);
+- kset_unregister(nfs_client_kset);
++ kset_unregister(nfs_kset);
+ }
+
+ static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
+@@ -158,7 +158,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p) {
+ p->net = net;
+- p->kobject.kset = nfs_client_kset;
++ p->kobject.kset = nfs_kset;
+ if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
+ parent, "nfs_client") == 0)
+ return p;
+--
+2.40.1
+
--- /dev/null
+From 2dd17c2cafc3f03f89013275267c0da0f798795a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 24 Sep 2023 13:14:15 -0400
+Subject: NFSv4: Fix a state manager thread deadlock regression
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 956fd46f97d238032cb5fa4771cdaccc6e760f9a ]
+
+Commit 4dc73c679114 reintroduces the deadlock that was fixed by commit
+aeabb3c96186 ("NFSv4: Fix a NFSv4 state manager deadlock") because it
+prevents the setup of new threads to handle reboot recovery, while the
+older recovery thread is stuck returning delegations.
+
+Fixes: 4dc73c679114 ("NFSv4: keep state manager thread active if swap is enabled")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 4 +++-
+ fs/nfs/nfs4state.c | 36 +++++++++++++++++++++++++-----------
+ 2 files changed, 28 insertions(+), 12 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c34df51a8f2b7..1c2ed14bccef2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10408,7 +10408,9 @@ static void nfs4_disable_swap(struct inode *inode)
+ */
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+
+- nfs4_schedule_state_manager(clp);
++ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++ wake_up_var(&clp->cl_state);
+ }
+
+ static const struct inode_operations nfs4_dir_inode_operations = {
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 3fcef19e91984..10946b24c66f9 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1212,13 +1212,23 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ {
+ struct task_struct *task;
+ char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
++ struct rpc_clnt *clnt = clp->cl_rpcclient;
++ bool swapon = false;
+
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+- if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
+- wake_up_var(&clp->cl_state);
+- return;
++
++ if (atomic_read(&clnt->cl_swapper)) {
++ swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
++ &clp->cl_state);
++ if (!swapon) {
++ wake_up_var(&clp->cl_state);
++ return;
++ }
+ }
+- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
++
++ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
++ return;
++
+ __module_get(THIS_MODULE);
+ refcount_inc(&clp->cl_count);
+
+@@ -1235,8 +1245,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ __func__, PTR_ERR(task));
+ if (!nfs_client_init_is_complete(clp))
+ nfs_mark_client_ready(clp, PTR_ERR(task));
++ if (swapon)
++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ nfs4_clear_state_manager_bit(clp);
+- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ nfs_put_client(clp);
+ module_put(THIS_MODULE);
+ }
+@@ -2717,22 +2728,25 @@ static int nfs4_run_state_manager(void *ptr)
+
+ allow_signal(SIGKILL);
+ again:
+- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+ nfs4_state_manager(clp);
+- if (atomic_read(&cl->cl_swapper)) {
++
++ if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
++ !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
+ wait_var_event_interruptible(&clp->cl_state,
+ test_bit(NFS4CLNT_RUN_MANAGER,
+ &clp->cl_state));
+- if (atomic_read(&cl->cl_swapper) &&
+- test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
++ if (!atomic_read(&cl->cl_swapper))
++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
++ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
++ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ goto again;
+ /* Either no longer a swapper, or were signalled */
++ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ }
+- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+
+ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+- !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
++ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
+ goto again;
+
+ nfs_put_client(clp);
+--
+2.40.1
+
--- /dev/null
+From 95cd4492a69fb98643e1f97913a9e7116dacecf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Sep 2023 20:54:25 +0800
+Subject: ring-buffer: Fix bytes info in per_cpu buffer stats
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit 45d99ea451d0c30bfd4864f0fe485d7dac014902 ]
+
+The 'bytes' info in file 'per_cpu/cpu<X>/stats' means the number of
+bytes in cpu buffer that have not been consumed. However, currently
+after consuming data by reading file 'trace_pipe', the 'bytes' info
+was not changed as expected.
+
+ # cat per_cpu/cpu0/stats
+ entries: 0
+ overrun: 0
+ commit overrun: 0
+ bytes: 568 <--- 'bytes' is problematical !!!
+ oldest event ts: 8651.371479
+ now ts: 8653.912224
+ dropped events: 0
+ read events: 8
+
+The root cause is incorrect stat on cpu_buffer->read_bytes. To fix it:
+ 1. When stat 'read_bytes', account consumed event in rb_advance_reader();
+ 2. When stat 'entries_bytes', exclude the discarded padding event which
+ is smaller than minimum size because it is invisible to reader. Then
+ use rb_page_commit() instead of BUF_PAGE_SIZE at where accounting for
+ page-based read/remove/overrun.
+
+Also correct the comments of ring_buffer_bytes_cpu() in this patch.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230921125425.1708423-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: c64e148a3be3 ("trace: Add ring buffer stats to measure rate of events")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 682540bd56355..0938222b45988 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -355,6 +355,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
+ local_set(&bpage->commit, 0);
+ }
+
++static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
++{
++ return local_read(&bpage->page->commit);
++}
++
+ static void free_buffer_page(struct buffer_page *bpage)
+ {
+ free_page((unsigned long)bpage->page);
+@@ -1886,7 +1891,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ * Increment overrun to account for the lost events.
+ */
+ local_add(page_entries, &cpu_buffer->overrun);
+- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++ local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
+ }
+
+@@ -2236,11 +2241,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
+ cpu_buffer->reader_page->read);
+ }
+
+-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
+-{
+- return local_read(&bpage->page->commit);
+-}
+-
+ static struct ring_buffer_event *
+ rb_iter_head_event(struct ring_buffer_iter *iter)
+ {
+@@ -2386,7 +2386,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ * the counters.
+ */
+ local_add(entries, &cpu_buffer->overrun);
+- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
++ local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
+ local_inc(&cpu_buffer->pages_lost);
+
+ /*
+@@ -2529,9 +2529,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+
+ event = __rb_page_index(tail_page, tail);
+
+- /* account for padding bytes */
+- local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+-
+ /*
+ * Save the original length to the meta data.
+ * This will be used by the reader to add lost event
+@@ -2545,7 +2542,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ * write counter enough to allow another writer to slip
+ * in on this page.
+ * We put in a discarded commit instead, to make sure
+- * that this space is not used again.
++ * that this space is not used again, and this space will
++ * not be accounted into 'entries_bytes'.
+ *
+ * If we are less than the minimum size, we don't need to
+ * worry about it.
+@@ -2570,6 +2568,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ /* time delta must be non zero */
+ event->time_delta = 1;
+
++ /* account for padding bytes */
++ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
++
+ /* Make sure the padding is visible before the tail_page->write update */
+ smp_wmb();
+
+@@ -3935,7 +3936,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
+ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
+
+ /**
+- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
++ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to read from.
+ */
+@@ -4443,6 +4444,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
+
+ length = rb_event_length(event);
+ cpu_buffer->reader_page->read += length;
++ cpu_buffer->read_bytes += length;
+ }
+
+ static void rb_advance_iter(struct ring_buffer_iter *iter)
+@@ -5534,7 +5536,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
+ } else {
+ /* update the entry counter */
+ cpu_buffer->read += rb_page_entries(reader);
+- cpu_buffer->read_bytes += BUF_PAGE_SIZE;
++ cpu_buffer->read_bytes += rb_page_commit(reader);
+
+ /* swap the pages */
+ rb_init_page(bpage);
+--
+2.40.1
+
--- /dev/null
+From 164b4f554c2cb56f4185b1ee72c6ffa2777eae92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Mar 2023 15:24:46 +0100
+Subject: ring-buffer: remove obsolete comment for free_buffer_page()
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+[ Upstream commit a98151ad53b53f010ee364ec2fd06445b328578b ]
+
+The comment refers to mm/slob.c which is being removed. It comes from
+commit ed56829cb319 ("ring_buffer: reset buffer page when freeing") and
+according to Steven the borrowed code was a page mapcount and mapping
+reset, which was later removed by commit e4c2ce82ca27 ("ring_buffer:
+allocate buffer page pointer"). Thus the comment is not accurate anyway,
+remove it.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230315142446.27040-1-vbabka@suse.cz
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Ingo Molnar <mingo@elte.hu>
+Reported-by: Mike Rapoport <mike.rapoport@gmail.com>
+Suggested-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Fixes: e4c2ce82ca27 ("ring_buffer: allocate buffer page pointer")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: 45d99ea451d0 ("ring-buffer: Fix bytes info in per_cpu buffer stats")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 7d9af09bb0065..682540bd56355 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -355,10 +355,6 @@ static void rb_init_page(struct buffer_data_page *bpage)
+ local_set(&bpage->commit, 0);
+ }
+
+-/*
+- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
+- * this issue out.
+- */
+ static void free_buffer_page(struct buffer_page *bpage)
+ {
+ free_page((unsigned long)bpage->page);
+--
+2.40.1
+
ata-libata-core-do-not-register-pm-operations-for-sas-ports.patch
ata-libata-sata-increase-pmp-srst-timeout-to-10s.patch
fs-binfmt_elf_efpic-fix-personality-for-elf-fdpic.patch
+spi-spi-zynqmp-gqspi-fix-runtime-pm-imbalance-in-zyn.patch
+spi-zynqmp-gqspi-fix-clock-imbalance-on-probe-failur.patch
+nfs-cleanup-unused-rpc_clnt-variable.patch
+nfs-rename-nfs_client_kset-to-nfs_kset.patch
+nfsv4-fix-a-state-manager-thread-deadlock-regression.patch
+ring-buffer-remove-obsolete-comment-for-free_buffer_.patch
+ring-buffer-fix-bytes-info-in-per_cpu-buffer-stats.patch
--- /dev/null
+From 7559df4b474fc2018b46c41578cab48edb664eed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Apr 2021 15:46:44 +0800
+Subject: spi: spi-zynqmp-gqspi: Fix runtime PM imbalance in zynqmp_qspi_probe
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit a21fbc42807b15b74b0891bd557063e6acf4fcae ]
+
+When platform_get_irq() fails, a pairing PM usage counter
+increment is needed to keep the counter balanced. It's the
+same for the following error paths.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Link: https://lore.kernel.org/r/20210408092559.3824-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 1527b076ae2c ("spi: zynqmp-gqspi: fix clock imbalance on probe failure")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-zynqmp-gqspi.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index 3d3ac48243ebd..ed68e237314fb 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -1147,11 +1147,16 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
++
++ ret = pm_runtime_get_sync(&pdev->dev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
++ goto clk_dis_all;
++ }
++
+ /* QSPI controller initializations */
+ zynqmp_qspi_init_hw(xqspi);
+
+- pm_runtime_mark_last_busy(&pdev->dev);
+- pm_runtime_put_autosuspend(&pdev->dev);
+ xqspi->irq = platform_get_irq(pdev, 0);
+ if (xqspi->irq <= 0) {
+ ret = -ENXIO;
+@@ -1178,6 +1183,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
++ ctlr->auto_runtime_pm = true;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+@@ -1185,9 +1191,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ goto clk_dis_all;
+ }
+
++ pm_runtime_mark_last_busy(&pdev->dev);
++ pm_runtime_put_autosuspend(&pdev->dev);
++
+ return 0;
+
+ clk_dis_all:
++ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(xqspi->refclk);
+--
+2.40.1
+
--- /dev/null
+From 103603e34ed68e660af114c6fd7b7482ad6c4eae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jun 2023 10:24:35 +0200
+Subject: spi: zynqmp-gqspi: fix clock imbalance on probe failure
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 1527b076ae2cb6a9c590a02725ed39399fcad1cf ]
+
+Make sure that the device is not runtime suspended before explicitly
+disabling the clocks on probe failure and on driver unbind to avoid a
+clock enable-count imbalance.
+
+Fixes: 9e3a000362ae ("spi: zynqmp: Add pm runtime support")
+Cc: stable@vger.kernel.org # 4.19
+Cc: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+Cc: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/Message-Id: <20230622082435.7873-1-johan+linaro@kernel.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-zynqmp-gqspi.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index ed68e237314fb..12d9c5d6b9e26 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -1197,9 +1197,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ return 0;
+
+ clk_dis_all:
+- pm_runtime_put_sync(&pdev->dev);
+- pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
++ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(xqspi->refclk);
+ clk_dis_pclk:
+ clk_disable_unprepare(xqspi->pclk);
+@@ -1223,11 +1223,15 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
+ {
+ struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+
++ pm_runtime_get_sync(&pdev->dev);
++
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
++
++ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
++ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(xqspi->refclk);
+ clk_disable_unprepare(xqspi->pclk);
+- pm_runtime_set_suspended(&pdev->dev);
+- pm_runtime_disable(&pdev->dev);
+
+ return 0;
+ }
+--
+2.40.1
+