--- /dev/null
+From 2e7b50347c4aa8d4338c62a5eb315028b4578572 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Jun 2023 15:15:47 +0800
+Subject: ceph: pass the mdsc to several helpers
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 197b7d792d6aead2e30d4b2c054ffabae2ed73dc ]
+
+We will use the 'mdsc' to get the global_id in the following commits.
+
+Link: https://tracker.ceph.com/issues/61590
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
+Reviewed-by: Milind Changire <mchangir@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: b372e96bd0a3 ("ceph: redirty page before returning AOP_WRITEPAGE_ACTIVATE")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 15 ++++++++------
+ fs/ceph/debugfs.c | 4 ++--
+ fs/ceph/dir.c | 2 +-
+ fs/ceph/file.c | 2 +-
+ fs/ceph/mds_client.c | 39 +++++++++++++++++++++----------------
+ fs/ceph/mds_client.h | 3 ++-
+ fs/ceph/mdsmap.c | 3 ++-
+ fs/ceph/snap.c | 16 +++++++++------
+ fs/ceph/super.h | 3 ++-
+ include/linux/ceph/mdsmap.h | 5 ++++-
+ 10 files changed, 55 insertions(+), 37 deletions(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index fc9f8f1a9036d..02f93437be353 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1178,7 +1178,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+ }
+ }
+
+-void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
++void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
++ bool queue_release)
+ {
+ struct ceph_inode_info *ci = cap->ci;
+ struct ceph_fs_client *fsc;
+@@ -1342,6 +1343,8 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
+ */
+ void __ceph_remove_caps(struct ceph_inode_info *ci)
+ {
++ struct inode *inode = &ci->netfs.inode;
++ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct rb_node *p;
+
+ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+@@ -1351,7 +1354,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+ p = rb_next(p);
+- ceph_remove_cap(cap, true);
++ ceph_remove_cap(mdsc, cap, true);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+ }
+@@ -4000,7 +4003,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ goto out_unlock;
+
+ if (target < 0) {
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ }
+
+@@ -4035,7 +4038,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ change_auth_cap_ses(ci, tcap->session);
+ }
+ }
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ } else if (tsession) {
+ /* add placeholder for the export tagert */
+@@ -4052,7 +4055,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ spin_unlock(&mdsc->cap_dirty_lock);
+ }
+
+- ceph_remove_cap(cap, false);
++ ceph_remove_cap(mdsc, cap, false);
+ goto out_unlock;
+ }
+
+@@ -4165,7 +4168,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
+ ocap->mseq, mds, le32_to_cpu(ph->seq),
+ le32_to_cpu(ph->mseq));
+ }
+- ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
++ ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+ }
+
+ *old_issued = issued;
+diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
+index 3904333fa6c38..2f1e7498cd745 100644
+--- a/fs/ceph/debugfs.c
++++ b/fs/ceph/debugfs.c
+@@ -81,7 +81,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ if (req->r_inode) {
+ seq_printf(s, " #%llx", ceph_ino(req->r_inode));
+ } else if (req->r_dentry) {
+- path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
++ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+@@ -100,7 +100,7 @@ static int mdsc_show(struct seq_file *s, void *p)
+ }
+
+ if (req->r_old_dentry) {
+- path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
++ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 854cbdd666619..fff5cb2df9a89 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1226,7 +1226,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ if (result) {
+ int pathlen = 0;
+ u64 base = 0;
+- char *path = ceph_mdsc_build_path(dentry, &pathlen,
++ char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
+ &base, 0);
+
+ /* mark error on parent + clear complete */
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index bdd0a3b894b7b..472e86454488d 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -574,7 +574,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
+ if (result) {
+ int pathlen = 0;
+ u64 base = 0;
+- char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
++ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
+ &base, 0);
+
+ pr_warn("async create failure path=(%llx)%s result=%d!\n",
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 6d76fd0f704a6..750dfe512aca0 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2126,6 +2126,7 @@ static bool drop_negative_children(struct dentry *dentry)
+ */
+ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ int *remaining = arg;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int used, wanted, oissued, mine;
+@@ -2173,7 +2174,7 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+
+ if (oissued) {
+ /* we aren't the only cap.. just remove us */
+- ceph_remove_cap(cap, true);
++ ceph_remove_cap(mdsc, cap, true);
+ (*remaining)--;
+ } else {
+ struct dentry *dentry;
+@@ -2588,6 +2589,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+
+ /**
+ * ceph_mdsc_build_path - build a path string to a given dentry
++ * @mdsc: mds client
+ * @dentry: dentry to which path should be built
+ * @plen: returned length of string
+ * @pbase: returned base inode number
+@@ -2607,8 +2609,8 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+ * Encode hidden .snap dirs as a double /, i.e.
+ * foo/.snap/bar -> foo//bar
+ */
+-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
+- int for_wire)
++char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
++ int *plen, u64 *pbase, int for_wire)
+ {
+ struct dentry *cur;
+ struct inode *inode;
+@@ -2726,9 +2728,9 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
+ return path + pos;
+ }
+
+-static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+- const char **ppath, int *ppathlen, u64 *pino,
+- bool *pfreepath, bool parent_locked)
++static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
++ struct inode *dir, const char **ppath, int *ppathlen,
++ u64 *pino, bool *pfreepath, bool parent_locked)
+ {
+ char *path;
+
+@@ -2744,7 +2746,7 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
+ return 0;
+ }
+ rcu_read_unlock();
+- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
++ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+@@ -2756,6 +2758,7 @@ static int build_inode_path(struct inode *inode,
+ const char **ppath, int *ppathlen, u64 *pino,
+ bool *pfreepath)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ struct dentry *dentry;
+ char *path;
+
+@@ -2765,7 +2768,7 @@ static int build_inode_path(struct inode *inode,
+ return 0;
+ }
+ dentry = d_find_alias(inode);
+- path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
++ path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ dput(dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -2778,10 +2781,11 @@ static int build_inode_path(struct inode *inode,
+ * request arguments may be specified via an inode *, a dentry *, or
+ * an explicit ino+path.
+ */
+-static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+- struct inode *rdiri, const char *rpath,
+- u64 rino, const char **ppath, int *pathlen,
+- u64 *ino, bool *freepath, bool parent_locked)
++static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
++ struct dentry *rdentry, struct inode *rdiri,
++ const char *rpath, u64 rino, const char **ppath,
++ int *pathlen, u64 *ino, bool *freepath,
++ bool parent_locked)
+ {
+ int r = 0;
+
+@@ -2790,7 +2794,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ ceph_snap(rinode));
+ } else if (rdentry) {
+- r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
++ r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
+ freepath, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ *ppath);
+@@ -2877,7 +2881,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ &session->s_features);
+
+- ret = set_request_path_attr(req->r_inode, req->r_dentry,
++ ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path1, &pathlen1, &ino1, &freepath1,
+ test_bit(CEPH_MDS_R_PARENT_LOCKED,
+@@ -2891,7 +2895,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
+ if (req->r_old_dentry &&
+ !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
+ old_dentry = req->r_old_dentry;
+- ret = set_request_path_attr(NULL, old_dentry,
++ ret = set_request_path_attr(mdsc, NULL, old_dentry,
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path2, &pathlen2, &ino2, &freepath2, true);
+@@ -4290,6 +4294,7 @@ static struct dentry* d_find_primary(struct inode *inode)
+ */
+ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+ union {
+ struct ceph_mds_cap_reconnect v2;
+ struct ceph_mds_cap_reconnect_v1 v1;
+@@ -4307,7 +4312,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ dentry = d_find_primary(inode);
+ if (dentry) {
+ /* set pathbase to parent dir when msg_version >= 2 */
+- path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
++ path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
+ recon_state->msg_version >= 2);
+ dput(dentry);
+ if (IS_ERR(path)) {
+@@ -5662,7 +5667,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+ return;
+ }
+
+- newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
++ newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
+ if (IS_ERR(newmap)) {
+ err = PTR_ERR(newmap);
+ goto bad_unlock;
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 5a3714bdd64a8..d930eb79dc380 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -581,7 +581,8 @@ static inline void ceph_mdsc_free_path(char *path, int len)
+ __putname(path - (PATH_MAX - 1 - len));
+ }
+
+-extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
++extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
++ struct dentry *dentry, int *plen, u64 *base,
+ int for_wire);
+
+ extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
+diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
+index 3bb3b610d403e..66afb18df76b2 100644
+--- a/fs/ceph/mdsmap.c
++++ b/fs/ceph/mdsmap.c
+@@ -114,7 +114,8 @@ static int __decode_and_drop_compat_set(void **p, void* end)
+ * Ignore any fields we don't care about (there are quite a few of
+ * them).
+ */
+-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
++struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
++ void *end, bool msgr2)
+ {
+ struct ceph_mdsmap *m;
+ const void *start = *p;
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 813f21add992c..55090e6c99672 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -329,7 +329,8 @@ static int cmpu64_rev(const void *a, const void *b)
+ /*
+ * build the snap context for a given realm.
+ */
+-static int build_snap_context(struct ceph_snap_realm *realm,
++static int build_snap_context(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm,
+ struct list_head *realm_queue,
+ struct list_head *dirty_realms)
+ {
+@@ -425,7 +426,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
+ /*
+ * rebuild snap context for the given realm and all of its children.
+ */
+-static void rebuild_snap_realms(struct ceph_snap_realm *realm,
++static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm,
+ struct list_head *dirty_realms)
+ {
+ LIST_HEAD(realm_queue);
+@@ -451,7 +453,8 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm,
+ continue;
+ }
+
+- last = build_snap_context(_realm, &realm_queue, dirty_realms);
++ last = build_snap_context(mdsc, _realm, &realm_queue,
++ dirty_realms);
+ dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
+ last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
+
+@@ -708,7 +711,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
+ * Queue cap_snaps for snap writeback for this realm and its children.
+ * Called under snap_rwsem, so realm topology won't change.
+ */
+-static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
++static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
++ struct ceph_snap_realm *realm)
+ {
+ struct ceph_inode_info *ci;
+ struct inode *lastinode = NULL;
+@@ -855,7 +859,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+
+ /* rebuild_snapcs when we reach the _end_ (root) of the trace */
+ if (realm_to_rebuild && p >= e)
+- rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
++ rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
+
+ if (!first_realm)
+ first_realm = realm;
+@@ -873,7 +877,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+ realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
+ dirty_item);
+ list_del_init(&realm->dirty_item);
+- queue_realm_cap_snaps(realm);
++ queue_realm_cap_snaps(mdsc, realm);
+ }
+
+ if (realm_ret)
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 51c7f2b14f6f8..09c262dd5bd36 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1223,7 +1223,8 @@ extern void ceph_add_cap(struct inode *inode,
+ unsigned cap, unsigned seq, u64 realmino, int flags,
+ struct ceph_cap **new_cap);
+ extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+-extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
++extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
++ bool queue_release);
+ extern void __ceph_remove_caps(struct ceph_inode_info *ci);
+ extern void ceph_put_cap(struct ceph_mds_client *mdsc,
+ struct ceph_cap *cap);
+diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
+index fcc95bff72a57..1f2171dd01bfa 100644
+--- a/include/linux/ceph/mdsmap.h
++++ b/include/linux/ceph/mdsmap.h
+@@ -5,6 +5,8 @@
+ #include <linux/bug.h>
+ #include <linux/ceph/types.h>
+
++struct ceph_mds_client;
++
+ /*
+ * mds map - describe servers in the mds cluster.
+ *
+@@ -69,7 +71,8 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+ }
+
+ extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
++struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
++ void *end, bool msgr2);
+ extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+ extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
+
+--
+2.43.0
+
--- /dev/null
+From ce0001a2c072c010ee98330303bb45b8982a1500 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Mar 2024 09:21:20 +1100
+Subject: ceph: redirty page before returning AOP_WRITEPAGE_ACTIVATE
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit b372e96bd0a32729d55d27f613c8bc80708a82e1 ]
+
+The page has been marked clean before writepage is called. If we don't
+redirty it before postponing the write, it might never get written.
+
+Cc: stable@vger.kernel.org
+Fixes: 503d4fa6ee28 ("ceph: remove reliance on bdi congestion")
+Signed-off-by: NeilBrown <neilb@suse.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: Xiubo Li <xiubli@redhat.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/addr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 28fa05a9d4d2f..da64bb7325dbc 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -803,8 +803,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
+ ihold(inode);
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- ceph_inode_to_fs_client(inode)->write_congested)
++ ceph_inode_to_fs_client(inode)->write_congested) {
++ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
++ }
+
+ wait_on_page_fscache(page);
+
+--
+2.43.0
+
--- /dev/null
+From a3ce3dc5d35057addbc7047b5f8c1fad8ff8b050 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 10:50:38 +0800
+Subject: ceph: rename _to_client() to _to_fs_client()
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 5995d90d2d19f337df6a50bcf4699ef053214dac ]
+
+We need to covert the inode to ceph_client in the following commit,
+and will add one new helper for that, here we rename the old helper
+to _fs_client().
+
+Link: https://tracker.ceph.com/issues/61590
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
+Reviewed-by: Milind Changire <mchangir@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: b372e96bd0a3 ("ceph: redirty page before returning AOP_WRITEPAGE_ACTIVATE")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/addr.c | 20 ++++++++++----------
+ fs/ceph/cache.c | 2 +-
+ fs/ceph/caps.c | 40 ++++++++++++++++++++--------------------
+ fs/ceph/crypto.c | 2 +-
+ fs/ceph/dir.c | 22 +++++++++++-----------
+ fs/ceph/export.c | 10 +++++-----
+ fs/ceph/file.c | 24 ++++++++++++------------
+ fs/ceph/inode.c | 14 +++++++-------
+ fs/ceph/ioctl.c | 8 ++++----
+ fs/ceph/mds_client.c | 2 +-
+ fs/ceph/snap.c | 2 +-
+ fs/ceph/super.c | 22 +++++++++++-----------
+ fs/ceph/super.h | 10 +++++-----
+ fs/ceph/xattr.c | 12 ++++++------
+ 14 files changed, 95 insertions(+), 95 deletions(-)
+
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index f4863078f7fe5..28fa05a9d4d2f 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -229,7 +229,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
+ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
+ {
+ struct inode *inode = subreq->rreq->inode;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objno, objoff;
+ u32 xlen;
+@@ -244,7 +244,7 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
+ static void finish_netfs_read(struct ceph_osd_request *req)
+ {
+ struct inode *inode = req->r_inode;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct netfs_io_subrequest *subreq = req->r_priv;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
+@@ -348,7 +348,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct inode *inode = rreq->inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req = NULL;
+ struct ceph_vino vino = ceph_vino(inode);
+ struct iov_iter iter;
+@@ -658,7 +658,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+ struct folio *folio = page_folio(page);
+ struct inode *inode = page->mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_snap_context *snapc, *oldest;
+ loff_t page_off = page_offset(page);
+ int err;
+@@ -803,7 +803,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
+ ihold(inode);
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+- ceph_inode_to_client(inode)->write_congested)
++ ceph_inode_to_fs_client(inode)->write_congested)
+ return AOP_WRITEPAGE_ACTIVATE;
+
+ wait_on_page_fscache(page);
+@@ -836,7 +836,7 @@ static void writepages_finish(struct ceph_osd_request *req)
+ int rc = req->r_result;
+ struct ceph_snap_context *snapc = req->r_snapc;
+ struct address_space *mapping = inode->i_mapping;
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ unsigned int len = 0;
+ bool remove_page;
+
+@@ -926,7 +926,7 @@ static int ceph_writepages_start(struct address_space *mapping,
+ {
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_vino vino = ceph_vino(inode);
+ pgoff_t index, start_index, end = -1;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
+@@ -1823,7 +1823,7 @@ int ceph_uninline_data(struct file *file)
+ {
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req = NULL;
+ struct ceph_cap_flush *prealloc_cf = NULL;
+ struct folio *folio = NULL;
+@@ -1977,7 +1977,7 @@ enum {
+ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
+ s64 pool, struct ceph_string *pool_ns)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
+ struct rb_node **p, *parent;
+@@ -2168,7 +2168,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
+ return 0;
+ }
+
+- if (ceph_test_mount_opt(ceph_inode_to_client(inode),
++ if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
+ NOPOOLPERM))
+ return 0;
+
+diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
+index de1dee46d3df7..930fbd54d2c8c 100644
+--- a/fs/ceph/cache.c
++++ b/fs/ceph/cache.c
+@@ -15,7 +15,7 @@
+ void ceph_fscache_register_inode_cookie(struct inode *inode)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+
+ /* No caching for filesystem? */
+ if (!fsc->fscache)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 02f93437be353..00045b8eadd14 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -635,7 +635,7 @@ void ceph_add_cap(struct inode *inode,
+ unsigned seq, unsigned mseq, u64 realmino, int flags,
+ struct ceph_cap **new_cap)
+ {
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap *cap;
+ int mds = session->s_mds;
+@@ -922,7 +922,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
+ int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int touch)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ int r;
+
+ r = __ceph_caps_issued_mask(ci, mask, touch);
+@@ -996,7 +996,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
+ const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
+ const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
+ struct ceph_mount_options *opt =
+- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
++ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
+ unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
+
+@@ -1121,7 +1121,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+
+ dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
+
+- mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
++ mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
+
+ /* remove from inode's cap rbtree, and clear auth cap */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+@@ -1192,7 +1192,7 @@ void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+- fsc = ceph_inode_to_client(&ci->netfs.inode);
++ fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
+ WARN_ON_ONCE(ci->i_auth_cap == cap &&
+ !list_empty(&ci->i_dirty_item) &&
+ !fsc->blocklisted &&
+@@ -1344,7 +1344,7 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
+ void __ceph_remove_caps(struct ceph_inode_info *ci)
+ {
+ struct inode *inode = &ci->netfs.inode;
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct rb_node *p;
+
+ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+@@ -1689,7 +1689,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession)
+ {
+ struct inode *inode = &ci->netfs.inode;
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_mds_session *session = NULL;
+ bool need_put = false;
+ int mds;
+@@ -1754,7 +1754,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
+ struct ceph_cap_flush **pcf)
+ {
+ struct ceph_mds_client *mdsc =
+- ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
++ ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
+ struct inode *inode = &ci->netfs.inode;
+ int was = ci->i_dirty_caps;
+ int dirty = 0;
+@@ -1877,7 +1877,7 @@ static u64 __mark_caps_flushing(struct inode *inode,
+ struct ceph_mds_session *session, bool wake,
+ u64 *oldest_flush_tid)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap_flush *cf = NULL;
+ int flushing;
+@@ -2236,7 +2236,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags)
+ */
+ static int try_flush_caps(struct inode *inode, u64 *ptid)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int flushing = 0;
+ u64 flush_tid = 0, oldest_flush_tid = 0;
+@@ -2314,7 +2314,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
+ */
+ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req1 = NULL, *req2 = NULL;
+ int ret, err = 0;
+@@ -2497,7 +2497,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
+ caps_are_flushed(inode, flush_tid));
+ } else {
+ struct ceph_mds_client *mdsc =
+- ceph_sb_to_client(inode->i_sb)->mdsc;
++ ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+
+ spin_lock(&ci->i_ceph_lock);
+ if (__ceph_caps_dirty(ci))
+@@ -2750,7 +2750,7 @@ static int try_get_cap_refs(struct inode *inode, int need, int want,
+ loff_t endoff, int flags, int *got)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ int ret = 0;
+ int have, implemented;
+ bool snap_rwsem_locked = false;
+@@ -2968,7 +2968,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
+ int want, loff_t endoff, int *got)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int ret, _got, flags;
+
+ ret = ceph_pool_perm_check(inode, need);
+@@ -3731,7 +3731,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
+ __releases(ci->i_ceph_lock)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *cf, *tmp_cf;
+ LIST_HEAD(to_remove);
+ unsigned seq = le32_to_cpu(m->seq);
+@@ -3837,7 +3837,7 @@ void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
+ bool *wake_ci, bool *wake_mdsc)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ bool ret;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+@@ -3881,7 +3881,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
+ struct ceph_mds_session *session)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ u64 follows = le64_to_cpu(m->snap_follows);
+ struct ceph_cap_snap *capsnap = NULL, *iter;
+ bool wake_ci = false;
+@@ -3973,7 +3973,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ struct ceph_mds_cap_peer *ph,
+ struct ceph_mds_session *session)
+ {
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_mds_session *tsession = NULL;
+ struct ceph_cap *cap, *tcap, *new_cap = NULL;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+@@ -4676,7 +4676,7 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
+
+ if (__ceph_caps_dirty(ci)) {
+ struct ceph_mds_client *mdsc =
+- ceph_inode_to_client(inode)->mdsc;
++ ceph_inode_to_fs_client(inode)->mdsc;
+ __cap_delay_requeue_front(mdsc, ci);
+ }
+ }
+@@ -4856,7 +4856,7 @@ static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+
+ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_auth;
+diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
+index 5b5112c784629..08c3856107316 100644
+--- a/fs/ceph/crypto.c
++++ b/fs/ceph/crypto.c
+@@ -129,7 +129,7 @@ static bool ceph_crypt_empty_dir(struct inode *inode)
+
+ static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
+ {
+- return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
++ return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
+ }
+
+ static struct fscrypt_operations ceph_fscrypt_ops = {
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index fff5cb2df9a89..1395b71df5ccc 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -310,7 +310,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
+ struct ceph_dir_file_info *dfi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ int i;
+ int err;
+@@ -703,7 +703,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
+ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
+ struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
+
+ /* .snap dir? */
+@@ -771,7 +771,7 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
+ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+ struct ceph_mds_request *req;
+ int op;
+@@ -1199,7 +1199,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+ {
+ struct dentry *dentry = req->r_dentry;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int result = req->r_err ? req->r_err :
+ le32_to_cpu(req->r_reply_info.head->result);
+@@ -1290,7 +1290,7 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
+ */
+ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct inode *inode = d_inode(dentry);
+ struct ceph_mds_request *req;
+@@ -1469,7 +1469,7 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
+ return;
+ }
+
+- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_move_tail(&di->lease_list, &mdsc->dentry_leases);
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1516,7 +1516,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
+ return;
+ }
+
+- mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ __dentry_dir_lease_touch(mdsc, di),
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1530,7 +1530,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
+ if (list_empty(&di->lease_list))
+ return;
+
+- mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_list_lock);
+ list_del_init(&di->lease_list);
+ spin_unlock(&mdsc->dentry_list_lock);
+@@ -1888,7 +1888,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+ dentry, inode, ceph_dentry(dentry)->offset,
+ !!(dentry->d_flags & DCACHE_NOKEY_NAME));
+
+- mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
++ mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
+
+ /* always trust cached snapped dentries, snapdir dentry */
+ if (ceph_snap(dir) != CEPH_NOSNAP) {
+@@ -1995,7 +1995,7 @@ static int ceph_d_delete(const struct dentry *dentry)
+ static void ceph_d_release(struct dentry *dentry)
+ {
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+
+ dout("d_release %p\n", dentry);
+
+@@ -2064,7 +2064,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
+ int left;
+ const int bufsize = 1024;
+
+- if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
++ if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
+ return -EISDIR;
+
+ if (!dfi->dir_info) {
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 8559990a59a5c..52c4daf2447d3 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -123,7 +123,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+
+ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct inode *inode;
+ struct ceph_vino vino;
+ int err;
+@@ -205,7 +205,7 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
+ struct ceph_nfs_snapfh *sfh,
+ bool want_parent)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *inode;
+ struct ceph_vino vino;
+@@ -317,7 +317,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
+ static struct dentry *__get_parent(struct super_block *sb,
+ struct dentry *child, u64 ino)
+ {
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *inode;
+ int mask;
+@@ -439,7 +439,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
+ {
+ struct inode *inode = d_inode(child);
+ struct inode *dir = d_inode(parent);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_request *req = NULL;
+ char *last_name = NULL;
+ unsigned next_offset = 2;
+@@ -544,7 +544,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return __get_snap_name(parent, name, child);
+
+- mdsc = ceph_inode_to_client(inode)->mdsc;
++ mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
+ USE_ANY_MDS);
+ if (IS_ERR(req))
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 472e86454488d..1e0497295662a 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -200,7 +200,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mount_options *opt =
+- ceph_inode_to_client(&ci->netfs.inode)->mount_options;
++ ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+ struct ceph_file_info *fi;
+ int ret;
+
+@@ -234,7 +234,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
+
+ spin_lock_init(&fi->rw_contexts_lock);
+ INIT_LIST_HEAD(&fi->rw_contexts);
+- fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
++ fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
+
+ if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
+ ret = ceph_uninline_data(file);
+@@ -352,7 +352,7 @@ int ceph_renew_caps(struct inode *inode, int fmode)
+ int ceph_open(struct inode *inode, struct file *file)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_file_info *fi = file->private_data;
+@@ -730,7 +730,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
+ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ struct inode *new_inode = NULL;
+@@ -962,7 +962,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ u64 *last_objver)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ ssize_t ret;
+ u64 off = *ki_pos;
+@@ -1259,7 +1259,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
+ if (aio_work) {
+ INIT_WORK(&aio_work->work, ceph_aio_retry_work);
+ aio_work->req = req;
+- queue_work(ceph_inode_to_client(inode)->inode_wq,
++ queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
+ &aio_work->work);
+ return;
+ }
+@@ -1389,7 +1389,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
+ struct ceph_vino vino;
+ struct ceph_osd_request *req;
+@@ -1613,7 +1613,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_osd_request *req;
+ struct page **pages;
+@@ -2231,7 +2231,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_cap_flush *prealloc_cf;
+ ssize_t count, written = 0;
+@@ -2465,7 +2465,7 @@ static int ceph_zero_partial_object(struct inode *inode,
+ loff_t offset, loff_t *length)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_osd_request *req;
+ int ret = 0;
+ loff_t zero = 0;
+@@ -2848,7 +2848,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ struct ceph_inode_info *src_ci = ceph_inode(src_inode);
+ struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
+ struct ceph_cap_flush *prealloc_cf;
+- struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
++ struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
+ loff_t size;
+ ssize_t ret = -EIO, bytes;
+ u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
+@@ -2856,7 +2856,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
+ int src_got = 0, dst_got = 0, err, dirty;
+
+ if (src_inode->i_sb != dst_inode->i_sb) {
+- struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
++ struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
+
+ if (ceph_fsid_compare(&src_fsc->client->fsid,
+ &dst_fsc->client->fsid)) {
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index b79100f720b38..db6977c15c282 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1489,7 +1489,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct inode *in = NULL;
+ struct ceph_vino tvino, dvino;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ int err = 0;
+
+ dout("fill_trace %p is_dentry %d is_target %d\n", req,
+@@ -2079,7 +2079,7 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
+
+ void ceph_queue_inode_work(struct inode *inode, int work_bit)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ set_bit(work_bit, &ci->i_work_mask);
+
+@@ -2427,7 +2427,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr,
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ unsigned int ia_valid = attr->ia_valid;
+ struct ceph_mds_request *req;
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *prealloc_cf;
+ loff_t isize = i_size_read(inode);
+ int issued;
+@@ -2740,7 +2740,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+ {
+ struct inode *inode = d_inode(dentry);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int err;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+@@ -2810,7 +2810,7 @@ int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
+ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int mask, bool force)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ int mode;
+@@ -2856,7 +2856,7 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
+ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
+ size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_mds_request *req;
+ int mode = USE_AUTH_MDS;
+@@ -3001,7 +3001,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
+ stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+- if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
++ if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
+ stat->size = ci->i_rbytes;
+ } else if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ struct ceph_inode_info *pci;
+diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
+index 91a84917d203c..3f617146e4ad3 100644
+--- a/fs/ceph/ioctl.c
++++ b/fs/ceph/ioctl.c
+@@ -65,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
+ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
+ {
+ struct inode *inode = file_inode(file);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ struct ceph_inode_info *ci = ceph_inode(file_inode(file));
+@@ -140,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ int err;
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+
+ /* copy and validate */
+ if (copy_from_user(&l, arg, sizeof(l)))
+@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc =
+- &ceph_sb_to_client(inode->i_sb)->client->osdc;
++ &ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
+ struct ceph_object_locator oloc;
+ CEPH_DEFINE_OID_ONSTACK(oid);
+ u32 xlen;
+@@ -244,7 +244,7 @@ static long ceph_ioctl_lazyio(struct file *file)
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+
+ if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
+ spin_lock(&ci->i_ceph_lock);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 750dfe512aca0..11289ce8a8cc8 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -830,7 +830,7 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
+ */
+ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+ struct dentry *pdentry = dentry->d_parent;
+ struct dentry *udentry, *found = NULL;
+ struct ceph_dentry_info *di;
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 55090e6c99672..d0d3612f28f0e 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -964,7 +964,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
+ void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+ struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 2d7f5a8d4a926..52af90beab000 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -44,7 +44,7 @@ static LIST_HEAD(ceph_fsc_list);
+ */
+ static void ceph_put_super(struct super_block *s)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+
+ dout("put_super\n");
+ ceph_fscrypt_free_dummy_policy(fsc);
+@@ -53,7 +53,7 @@ static void ceph_put_super(struct super_block *s)
+
+ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+- struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
+ struct ceph_mon_client *monc = &fsc->client->monc;
+ struct ceph_statfs st;
+ int i, err;
+@@ -118,7 +118,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+
+ static int ceph_sync_fs(struct super_block *sb, int wait)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ if (!wait) {
+ dout("sync_fs (non-blocking)\n");
+@@ -684,7 +684,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+ */
+ static int ceph_show_options(struct seq_file *m, struct dentry *root)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
+ struct ceph_mount_options *fsopt = fsc->mount_options;
+ size_t pos;
+ int ret;
+@@ -1015,7 +1015,7 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
+ */
+ void ceph_umount_begin(struct super_block *sb)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ dout("ceph_umount_begin - starting forced umount\n");
+ if (!fsc)
+@@ -1226,7 +1226,7 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
+ struct ceph_fs_client *new = fc->s_fs_info;
+ struct ceph_mount_options *fsopt = new->mount_options;
+ struct ceph_options *opt = new->client->options;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ dout("ceph_compare_super %p\n", sb);
+
+@@ -1322,9 +1322,9 @@ static int ceph_get_tree(struct fs_context *fc)
+ goto out;
+ }
+
+- if (ceph_sb_to_client(sb) != fsc) {
++ if (ceph_sb_to_fs_client(sb) != fsc) {
+ destroy_fs_client(fsc);
+- fsc = ceph_sb_to_client(sb);
++ fsc = ceph_sb_to_fs_client(sb);
+ dout("get_sb got existing client %p\n", fsc);
+ } else {
+ dout("get_sb using new client %p\n", fsc);
+@@ -1377,7 +1377,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ struct super_block *sb = fc->root->d_sb;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+
+ err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
+ if (err)
+@@ -1516,7 +1516,7 @@ void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+
+ static void ceph_kill_sb(struct super_block *s)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(s);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ bool wait;
+
+@@ -1578,7 +1578,7 @@ MODULE_ALIAS_FS("ceph");
+
+ int ceph_force_reconnect(struct super_block *sb)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+ int err = 0;
+
+ fsc->mount_state = CEPH_MOUNT_RECOVER;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 09c262dd5bd36..8efd4ba607744 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -488,13 +488,13 @@ ceph_inode(const struct inode *inode)
+ }
+
+ static inline struct ceph_fs_client *
+-ceph_inode_to_client(const struct inode *inode)
++ceph_inode_to_fs_client(const struct inode *inode)
+ {
+ return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
+ }
+
+ static inline struct ceph_fs_client *
+-ceph_sb_to_client(const struct super_block *sb)
++ceph_sb_to_fs_client(const struct super_block *sb)
+ {
+ return (struct ceph_fs_client *)sb->s_fs_info;
+ }
+@@ -502,7 +502,7 @@ ceph_sb_to_client(const struct super_block *sb)
+ static inline struct ceph_mds_client *
+ ceph_sb_to_mdsc(const struct super_block *sb)
+ {
+- return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
++ return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
+ }
+
+ static inline struct ceph_vino
+@@ -558,7 +558,7 @@ static inline u64 ceph_snap(struct inode *inode)
+ */
+ static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
+ {
+- if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
++ if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
+ return ceph_ino_to_ino32(ino);
+ return ino;
+ }
+@@ -1106,7 +1106,7 @@ void ceph_inode_shutdown(struct inode *inode);
+ static inline bool ceph_inode_is_shutdown(struct inode *inode)
+ {
+ unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
+- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
++ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ int state = READ_ONCE(fsc->mount_state);
+
+ return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 0deae4a0f5f16..558f64554b591 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
+ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ struct ceph_string *pool_ns;
+ s64 pool = ci->i_layout.pool_id;
+@@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+ ssize_t ret;
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
+ s64 pool = ci->i_layout.pool_id;
+ const char *pool_name;
+@@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
+ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+
+ return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
+ }
+@@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
+ static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
+ char *val, size_t size)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+
+ return ceph_fmt_xattr(val, size, "client%lld",
+ ceph_client_gid(fsc->client));
+@@ -1094,7 +1094,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
+ static int ceph_sync_setxattr(struct inode *inode, const char *name,
+ const char *value, size_t size, int flags)
+ {
+- struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+@@ -1164,7 +1164,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
+ {
+ struct ceph_vxattr *vxattr;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
++ struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
+ int issued;
+--
+2.43.0
+
--- /dev/null
+From b8235ef51b1ac6698489ee87cee505e38abe683e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Mar 2024 13:49:43 -0400
+Subject: drm/amd/display: Do not recursively call manual trigger programming
+
+From: Dillon Varone <dillon.varone@amd.com>
+
+[ Upstream commit 953927587f37b731abdeabe46ad44a3b3ec67a52 ]
+
+[WHY&HOW]
+We should not be recursively calling the manual trigger programming function when
+FAMS is not in use.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Alvin Lee <alvin.lee2@amd.com>
+Acked-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Dillon Varone <dillon.varone@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+index e817fa4efeee5..058dee76054ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+@@ -236,9 +236,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+-
+- // Setup manual flow control for EOF via TRIG_A
+- optc->funcs->setup_manual_trigger(optc);
+ }
+ }
+
+--
+2.43.0
+
--- /dev/null
+From a6881c944336282bcf0f266919832b36f4a145ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 16:04:33 +0300
+Subject: drm/i915: Adjust seamless_m_n flag behaviour
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 825edc8bc72f3266534a04e9a4447b12332fac82 ]
+
+Make the seamless_m_n flag more like the update_pipe fastset
+flag, ie. the flag will only be set if we need to do the seamless
+M/N update, and in all other cases the flag is cleared. Also
+rename the flag to update_m_n to make it more clear it's similar
+to update_pipe.
+
+I believe special casing seamless_m_n like this makes sense
+as it also affects eg. vblank evasion. We can potentially avoid
+some vblank evasion tricks, simplify some checks, and hopefully
+will help with the VRR vs. M/N mess.
+
+Cc: Manasi Navare <navaremanasi@chromium.org>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230901130440.2085-6-ville.syrjala@linux.intel.com
+Reviewed-by: Manasi Navare <navaremanasi@chromium.org>
+Stable-dep-of: 4a36e46df7aa ("drm/i915: Disable live M/N updates when using bigjoiner")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_atomic.c | 1 +
+ drivers/gpu/drm/i915/display/intel_crtc.c | 2 +-
+ drivers/gpu/drm/i915/display/intel_display.c | 22 +++++++++++--------
+ .../drm/i915/display/intel_display_types.h | 2 +-
+ drivers/gpu/drm/i915/display/intel_dp.c | 2 +-
+ 5 files changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
+index 7cf51dd8c0567..aaddd8c0cfa0e 100644
+--- a/drivers/gpu/drm/i915/display/intel_atomic.c
++++ b/drivers/gpu/drm/i915/display/intel_atomic.c
+@@ -259,6 +259,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
+ drm_property_blob_get(crtc_state->post_csc_lut);
+
+ crtc_state->update_pipe = false;
++ crtc_state->update_m_n = false;
+ crtc_state->disable_lp_wm = false;
+ crtc_state->disable_cxsr = false;
+ crtc_state->update_wm_pre = false;
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 5c89eba8148c0..cfbfbfed3f5e6 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -510,7 +510,7 @@ static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
+ * M/N is double buffered on the transcoder's undelayed vblank,
+ * so with seamless M/N we must evade both vblanks.
+ */
+- if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
++ if (new_crtc_state->update_m_n)
+ *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 39efd67cc3232..1a59fca40252c 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5215,7 +5215,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+- if (!fastset || !pipe_config->seamless_m_n)
++ if (!fastset || !pipe_config->update_m_n)
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+ } else {
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+@@ -5353,7 +5353,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
+
+- if (!fastset || !pipe_config->seamless_m_n) {
++ if (!fastset || !pipe_config->update_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+@@ -5448,6 +5448,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
+
+ crtc_state->uapi.mode_changed = true;
+ crtc_state->update_pipe = false;
++ crtc_state->update_m_n = false;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+@@ -5565,13 +5566,14 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
+ {
+ struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
+
+- if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
++ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
+ drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
++ else
++ new_crtc_state->uapi.mode_changed = false;
+
+- return;
+- }
++ if (intel_crtc_needs_modeset(new_crtc_state))
++ new_crtc_state->update_m_n = false;
+
+- new_crtc_state->uapi.mode_changed = false;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ new_crtc_state->update_pipe = true;
+ }
+@@ -6297,6 +6299,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+
+@@ -6309,6 +6312,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+
+@@ -6316,6 +6320,7 @@ int intel_atomic_check(struct drm_device *dev,
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
++ new_crtc_state->update_m_n = false;
+ }
+ }
+ }
+@@ -6494,7 +6499,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
+- if (new_crtc_state->seamless_m_n)
++ if (new_crtc_state->update_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
+ }
+@@ -6630,8 +6635,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ *
+ * FIXME Should be synchronized with the start of vblank somehow...
+ */
+- if (vrr_enabling(old_crtc_state, new_crtc_state) ||
+- (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)))
++ if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n)
+ intel_crtc_update_active_timings(new_crtc_state,
+ new_crtc_state->vrr.enable);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 8b0dc2b75da4a..1c23b186aff20 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1084,6 +1084,7 @@ struct intel_crtc_state {
+
+ unsigned fb_bits; /* framebuffers to flip */
+ bool update_pipe; /* can a fast modeset be performed? */
++ bool update_m_n; /* update M/N seamlessly during fastset? */
+ bool disable_cxsr;
+ bool update_wm_pre, update_wm_post; /* watermarks are updated */
+ bool fifo_changed; /* FIFO split is changed */
+@@ -1196,7 +1197,6 @@ struct intel_crtc_state {
+ /* m2_n2 for eDP downclock */
+ struct intel_link_m_n dp_m2_n2;
+ bool has_drrs;
+- bool seamless_m_n;
+
+ /* PSR is supported but might not be enabled due the lack of enabled planes */
+ bool has_psr;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index d712cb9b81e1e..7e135ed8e1d75 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2149,7 +2149,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
+ int pixel_clock;
+
+ if (has_seamless_m_n(connector))
+- pipe_config->seamless_m_n = true;
++ pipe_config->update_m_n = true;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+ if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+--
+2.43.0
+
--- /dev/null
+From c964396cda73ba328464682bd4511b1ce408630d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Apr 2024 18:50:04 +0300
+Subject: drm/i915/cdclk: Fix voltage_level programming edge case
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 6154cc9177ccea00c89ce0bf93352e474b819ff2 ]
+
+Currently we only consider the relationship of the
+old and new CDCLK frequencies when determining whether
+to do the repgramming from intel_set_cdclk_pre_plane_update()
+or intel_set_cdclk_post_plane_update().
+
+It is technically possible to have a situation where the
+CDCLK frequency is decreasing, but the voltage_level is
+increasing due a DDI port. In this case we should bump
+the voltage level already in intel_set_cdclk_pre_plane_update()
+(so that the voltage_level will have been increased by the
+time the port gets enabled), while leaving the CDCLK frequency
+unchanged (as active planes/etc. may still depend on it).
+We can then reduce the CDCLK frequency to its final value
+from intel_set_cdclk_post_plane_update().
+
+In order to handle that correctly we shall construct a
+suitable amalgam of the old and new cdclk states in
+intel_set_cdclk_pre_plane_update().
+
+And we can simply call intel_set_cdclk() unconditionally
+in both places as it will not do anything if nothing actually
+changes vs. the current hw state.
+
+v2: Handle cdclk_state->disable_pipes
+v3: Only synchronize the cd2x update against the pipe's vblank
+ when the cdclk frequency is changing during the current
+ commit phase (Gustavo)
+
+Cc: stable@vger.kernel.org
+Cc: Gustavo Sousa <gustavo.sousa@intel.com>
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240402155016.13733-3-ville.syrjala@linux.intel.com
+(cherry picked from commit 34d127e2bdef73a923aa0dcd95cbc3257ad5af52)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_cdclk.c | 37 ++++++++++++++++------
+ 1 file changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 5aa6b998a1cb1..fc3a6eb1de741 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2453,7 +2453,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+- enum pipe pipe = new_cdclk_state->pipe;
++ struct intel_cdclk_config cdclk_config;
++ enum pipe pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+@@ -2462,12 +2463,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_pre_notify(state);
+
+- if (new_cdclk_state->disable_pipes ||
+- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++ if (new_cdclk_state->disable_pipes) {
++ cdclk_config = new_cdclk_state->actual;
++ pipe = INVALID_PIPE;
++ } else {
++ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
++ cdclk_config = new_cdclk_state->actual;
++ pipe = new_cdclk_state->pipe;
++ } else {
++ cdclk_config = old_cdclk_state->actual;
++ pipe = INVALID_PIPE;
++ }
+
+- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
++ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
++ old_cdclk_state->actual.voltage_level);
+ }
++
++ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++
++ intel_set_cdclk(i915, &cdclk_config, pipe);
+ }
+
+ /**
+@@ -2485,7 +2499,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+- enum pipe pipe = new_cdclk_state->pipe;
++ enum pipe pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+@@ -2495,11 +2509,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+ intel_cdclk_pcode_post_notify(state);
+
+ if (!new_cdclk_state->disable_pipes &&
+- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
++ pipe = new_cdclk_state->pipe;
++ else
++ pipe = INVALID_PIPE;
+
+- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+- }
++ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
++
++ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ }
+
+ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+--
+2.43.0
+
--- /dev/null
+From 5bf00989c7fa6bb80607a7e6f6ba6c72f3be29e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 16:04:30 +0300
+Subject: drm/i915: Change intel_pipe_update_{start,end}() calling convention
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 09f390d4e2f38f8433431f4da31ca0a17a5c7853 ]
+
+We'll need to also look at the old crtc state in
+intel_pipe_update_start() so change the calling convention to
+just plumb in the full atomic state instead.
+
+Cc: Manasi Navare <navaremanasi@chromium.org>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230901130440.2085-3-ville.syrjala@linux.intel.com
+Reviewed-by: Manasi Navare <navaremanasi@chromium.org>
+Reviewed-by: Mitul Golani <mitulkumar.ajitkumar.golani@intel.com>
+Stable-dep-of: 4a36e46df7aa ("drm/i915: Disable live M/N updates when using bigjoiner")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_crtc.c | 18 ++++++++++++------
+ drivers/gpu/drm/i915/display/intel_crtc.h | 6 ++++--
+ drivers/gpu/drm/i915/display/intel_display.c | 4 ++--
+ 3 files changed, 18 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 182c6dd64f47c..65d91c7ad22ff 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -470,7 +470,8 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+
+ /**
+ * intel_pipe_update_start() - start update of a set of display registers
+- * @new_crtc_state: the new crtc state
++ * @state: the atomic state
++ * @crtc: the crtc
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+@@ -480,10 +481,12 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+-void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
++void intel_pipe_update_start(struct intel_atomic_state *state,
++ struct intel_crtc *crtc)
+ {
+- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
++ struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+@@ -631,15 +634,18 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+
+ /**
+ * intel_pipe_update_end() - end update of a set of display registers
+- * @new_crtc_state: the new crtc state
++ * @state: the atomic state
++ * @crtc: the crtc
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
++void intel_pipe_update_end(struct intel_atomic_state *state,
++ struct intel_crtc *crtc)
+ {
+- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
++ struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+ int scanline_end = intel_get_crtc_scanline(crtc);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
+index 51a4c8df9e657..22d7993d1f0ba 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.h
++++ b/drivers/gpu/drm/i915/display/intel_crtc.h
+@@ -36,8 +36,10 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
+ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
+-void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
+-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
++void intel_pipe_update_start(struct intel_atomic_state *state,
++ struct intel_crtc *crtc);
++void intel_pipe_update_end(struct intel_atomic_state *state,
++ struct intel_crtc *crtc);
+ void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
+ struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+ struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index a072fbb9872aa..af93761e82cac 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6616,7 +6616,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ intel_crtc_planes_update_noarm(state, crtc);
+
+ /* Perform vblank evasion around commit operation */
+- intel_pipe_update_start(new_crtc_state);
++ intel_pipe_update_start(state, crtc);
+
+ commit_pipe_pre_planes(state, crtc);
+
+@@ -6624,7 +6624,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+
+ commit_pipe_post_planes(state, crtc);
+
+- intel_pipe_update_end(new_crtc_state);
++ intel_pipe_update_end(state, crtc);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+--
+2.43.0
+
--- /dev/null
+From 73cfc32980dd26340ee434c13b39b7fe574d6435 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Apr 2024 00:34:28 +0300
+Subject: drm/i915: Disable live M/N updates when using bigjoiner
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 4a36e46df7aa781c756f09727d37dc2783f1ee75 ]
+
+All joined pipes share the same transcoder/timing generator.
+Currently we just do the commits per-pipe, which doesn't really
+work if we need to change the timings at the same time. For
+now just disable live M/N updates when bigjoiner is needed.
+
+Cc: stable@vger.kernel.org
+Tested-by: Vidya Srinivas <vidya.srinivas@intel.com>
+Reviewed-by: Arun R Murthy <arun.r.murthy@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240404213441.17637-5-ville.syrjala@linux.intel.com
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+(cherry picked from commit ef79820db723a2a7c229a7251c12859e7e25a247)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 7e135ed8e1d75..ccc47cf4d15d8 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2148,7 +2148,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
+ intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
+ int pixel_clock;
+
+- if (has_seamless_m_n(connector))
++ /*
++ * FIXME all joined pipes share the same transcoder.
++ * Need to account for that when updating M/N live.
++ */
++ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
+ pipe_config->update_m_n = true;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+--
+2.43.0
+
--- /dev/null
+From fd81ce8a820a0f671f1e890bfbe5c30254b85907 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 16:04:32 +0300
+Subject: drm/i915: Enable VRR later during fastsets
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 691dec86acc3afb469f09e9a4a00508b458bdb0c ]
+
+In order to reconcile seamless M/N updates with VRR we'll
+need to defer the fastset VRR enable to happen after the
+seamless M/N update (which happens during the vblank evade
+critical section). So just push the VRR enable to be the last
+thing during the update.
+
+This will also affect the vblank evasion as the transcoder
+will now still be running with the old VRR state during
+the vblank evasion. So just grab the timings always from the
+old crtc state during any non-modeset commit, and also grab
+the current state of VRR from the active timings (as we disable
+VRR before vblank evasion during fastsets).
+
+This also fixes vblank evasion for seamless M/N updates as
+we now properly account for the fact that the M/N update
+happens after vblank evasion.
+
+Cc: Manasi Navare <navaremanasi@chromium.org>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230901130440.2085-5-ville.syrjala@linux.intel.com
+Reviewed-by: Manasi Navare <navaremanasi@chromium.org>
+Reviewed-by: Mitul Golani <mitulkumar.ajitkumar.golani@intel.com>
+Stable-dep-of: 4a36e46df7aa ("drm/i915: Disable live M/N updates when using bigjoiner")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_crtc.c | 35 ++++++++++++--------
+ drivers/gpu/drm/i915/display/intel_display.c | 21 ++++++++----
+ 2 files changed, 36 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 9693747a18c66..5c89eba8148c0 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -472,15 +472,31 @@ static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ int *min, int *max, int *vblank_start)
+ {
++ const struct intel_crtc_state *old_crtc_state =
++ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+- const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
++ const struct intel_crtc_state *crtc_state;
++ const struct drm_display_mode *adjusted_mode;
+
+- if (new_crtc_state->vrr.enable) {
+- if (intel_vrr_is_push_sent(new_crtc_state))
+- *vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
++ /*
++ * During fastsets/etc. the transcoder is still
++ * running with the old timings at this point.
++ *
++ * TODO: maybe just use the active timings here?
++ */
++ if (intel_crtc_needs_modeset(new_crtc_state))
++ crtc_state = new_crtc_state;
++ else
++ crtc_state = old_crtc_state;
++
++ adjusted_mode = &crtc_state->hw.adjusted_mode;
++
++ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
++ if (intel_vrr_is_push_sent(crtc_state))
++ *vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ else
+- *vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
++ *vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ } else {
+ *vblank_start = intel_mode_vblank_start(adjusted_mode);
+ }
+@@ -712,15 +728,6 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
+ */
+ intel_vrr_send_push(new_crtc_state);
+
+- /*
+- * Seamless M/N update may need to update frame timings.
+- *
+- * FIXME Should be synchronized with the start of vblank somehow...
+- */
+- if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+- intel_crtc_update_active_timings(new_crtc_state,
+- new_crtc_state->vrr.enable);
+-
+ local_irq_enable();
+
+ if (intel_vgpu_active(dev_priv))
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index af93761e82cac..39efd67cc3232 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6533,6 +6533,8 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+ {
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
++ const struct intel_crtc_state *old_crtc_state =
++ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+@@ -6544,6 +6546,9 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ !intel_crtc_needs_modeset(new_crtc_state))
+ skl_detach_scalers(new_crtc_state);
++
++ if (vrr_enabling(old_crtc_state, new_crtc_state))
++ intel_vrr_enable(new_crtc_state);
+ }
+
+ static void intel_enable_crtc(struct intel_atomic_state *state,
+@@ -6584,12 +6589,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ intel_dpt_configure(crtc);
+ }
+
+- if (vrr_enabling(old_crtc_state, new_crtc_state)) {
+- intel_vrr_enable(new_crtc_state);
+- intel_crtc_update_active_timings(new_crtc_state,
+- new_crtc_state->vrr.enable);
+- }
+-
+ if (!modeset) {
+ if (new_crtc_state->preload_luts &&
+ intel_crtc_needs_color_update(new_crtc_state))
+@@ -6626,6 +6625,16 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+
+ intel_pipe_update_end(state, crtc);
+
++ /*
++ * VRR/Seamless M/N update may need to update frame timings.
++ *
++ * FIXME Should be synchronized with the start of vblank somehow...
++ */
++ if (vrr_enabling(old_crtc_state, new_crtc_state) ||
++ (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)))
++ intel_crtc_update_active_timings(new_crtc_state,
++ new_crtc_state->vrr.enable);
++
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+--
+2.43.0
+
--- /dev/null
+From d2798590bca44fc9cac3ab53c74d169f279e9fd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 16:04:31 +0300
+Subject: drm/i915: Extract intel_crtc_vblank_evade_scanlines()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit f4b0cece716c95e16d973a774d5a5c5cc8cb335d ]
+
+Pull the vblank evasion scanline calculations into their own helper
+to declutter intel_pipe_update_start() a bit.
+
+Reviewed-by: Manasi Navare <navaremanasi@chromium.org>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230901130440.2085-4-ville.syrjala@linux.intel.com
+Reviewed-by: Mitul Golani <mitulkumar.ajitkumar.golani@intel.com>
+Stable-dep-of: 4a36e46df7aa ("drm/i915: Disable live M/N updates when using bigjoiner")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_crtc.c | 53 +++++++++++++----------
+ 1 file changed, 31 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 65d91c7ad22ff..9693747a18c66 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -468,6 +468,36 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+ return vblank_start;
+ }
+
++static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
++ struct intel_crtc *crtc,
++ int *min, int *max, int *vblank_start)
++{
++ const struct intel_crtc_state *new_crtc_state =
++ intel_atomic_get_new_crtc_state(state, crtc);
++ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
++
++ if (new_crtc_state->vrr.enable) {
++ if (intel_vrr_is_push_sent(new_crtc_state))
++ *vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
++ else
++ *vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
++ } else {
++ *vblank_start = intel_mode_vblank_start(adjusted_mode);
++ }
++
++ /* FIXME needs to be calibrated sensibly */
++ *min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
++ VBLANK_EVASION_TIME_US);
++ *max = *vblank_start - 1;
++
++ /*
++ * M/N is double buffered on the transcoder's undelayed vblank,
++ * so with seamless M/N we must evade both vblanks.
++ */
++ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
++ *min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
++}
++
+ /**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @state: the atomic state
+@@ -487,7 +517,6 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+- const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+@@ -503,27 +532,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
+ if (intel_crtc_needs_vblank_work(new_crtc_state))
+ intel_crtc_vblank_work_init(new_crtc_state);
+
+- if (new_crtc_state->vrr.enable) {
+- if (intel_vrr_is_push_sent(new_crtc_state))
+- vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
+- else
+- vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+- } else {
+- vblank_start = intel_mode_vblank_start(adjusted_mode);
+- }
+-
+- /* FIXME needs to be calibrated sensibly */
+- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+- VBLANK_EVASION_TIME_US);
+- max = vblank_start - 1;
+-
+- /*
+- * M/N is double buffered on the transcoder's undelayed vblank,
+- * so with seamless M/N we must evade both vblanks.
+- */
+- if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+- min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+-
++ intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
+ if (min <= 0 || max <= 0)
+ goto irq_disable;
+
+--
+2.43.0
+
--- /dev/null
+From acd71c793e6c942741b83fee2df4cb923fa943bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 May 2023 17:38:59 +0300
+Subject: drm/i915: Fix FEC pipe A vs. DDI A mixup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 126f94e87e7960ef7ae58180e39c19cc9dcbbf7f ]
+
+On pre-TGL FEC is a port level feature, not a transcoder
+level feature, and it's DDI A which doesn't have it, not
+trancoder A. Check for the correct thing when determining
+whether FEC is supported or not.
+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230502143906.2401-5-ville.syrjala@linux.intel.com
+Reviewed-by: Luca Coelho <luciano.coelho@intel.com>
+Stable-dep-of: 99f855082f22 ("drm/i915/mst: Reject FEC+MST on ICL")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 18ee4f2a87f9e..fff008955cb2c 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1310,13 +1310,13 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
+ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+ {
++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+- /* On TGL, FEC is supported on all Pipes */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return true;
+
+- if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
++ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ return true;
+
+ return false;
+--
+2.43.0
+
--- /dev/null
+From 11e94baa526bc5d8fdf63f3292260c1e9830385a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Apr 2024 16:51:47 +0300
+Subject: drm/i915/mst: Reject FEC+MST on ICL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+[ Upstream commit 99f855082f228cdcecd6ab768d3b8b505e0eb028 ]
+
+ICL supposedly doesn't support FEC on MST. Reject it.
+
+Cc: stable@vger.kernel.org
+Fixes: d51f25eb479a ("drm/i915: Add DSC support to MST path")
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240402135148.23011-7-ville.syrjala@linux.intel.com
+(cherry picked from commit b648ce2a28ba83c4fa67c61fcc5983e15e9d4afb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index fff008955cb2c..d712cb9b81e1e 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1316,7 +1316,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return true;
+
+- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
++ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
++ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
+ return true;
+
+ return false;
+--
+2.43.0
+
--- /dev/null
+From e3a424cd221ae02762cee805a9714721c5f7d621 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 2 Dec 2023 01:40:25 +0200
+Subject: drm/msm/dpu: populate SSPP scaler block version
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 46b1f1b839cad600de3ad7ed999bd0155c528746 ]
+
+The function _dpu_hw_sspp_setup_scaler3() passes and
+dpu_hw_setup_scaler3() uses scaler_blk.version to determine in which way
+the scaler (QSEED3) block should be programmed. However up to now we
+were not setting this field. Set it now, splitting the vig_sblk data
+which has different version fields.
+
+Reported-by: Marijn Suijten <marijn.suijten@somainline.org>
+Fixes: 9b6f4fedaac2 ("drm/msm/dpu: Add SM6125 support")
+Fixes: 27f0df03f3ff ("drm/msm/dpu: Add SM6375 support")
+Fixes: 3186acba5cdc ("drm/msm/dpu: Add SM6350 support")
+Fixes: efcd0107727c ("drm/msm/dpu: add support for SM8550")
+Fixes: 4a352c2fc15a ("drm/msm/dpu: Introduce SC8280XP")
+Fixes: 0e91bcbb0016 ("drm/msm/dpu: Add SM8350 to hw catalog")
+Fixes: 100d7ef6995d ("drm/msm/dpu: add support for SM8450")
+Fixes: 3581b7062cec ("drm/msm/disp/dpu1: add support for display on SM6115")
+Fixes: dabfdd89eaa9 ("drm/msm/disp/dpu1: add inline rotation support for sc7280")
+Fixes: f3af2d6ee9ab ("drm/msm/dpu: Add SC8180x to hw catalog")
+Fixes: 94391a14fc27 ("drm/msm/dpu1: Add MSM8998 to hw catalog")
+Fixes: af776a3e1c30 ("drm/msm/dpu: add SM8250 to hw catalog")
+Fixes: 386fced3f76f ("drm/msm/dpu: add SM8150 to hw catalog")
+Fixes: b75ab05a3479 ("msm:disp:dpu1: add scaler support on SC7180 display")
+Fixes: 25fdd5933e4c ("drm/msm: Add SDM845 DPU support")
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/570098/
+Link: https://lore.kernel.org/r/20231201234234.2065610-2-dmitry.baryshkov@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../msm/disp/dpu1/catalog/dpu_5_0_sm8150.h | 8 +-
+ .../msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h | 8 +-
+ .../msm/disp/dpu1/catalog/dpu_8_1_sm8450.h | 8 +-
+ .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c | 95 ++++++++++++++-----
+ .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h | 3 +-
+ 5 files changed, 87 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 99acaf917e430..f0c3804f42587 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_0,
++ .sblk = &sm8150_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_1,
++ .sblk = &sm8150_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -93,7 +93,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_2,
++ .sblk = &sm8150_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -101,7 +101,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_3,
++ .sblk = &sm8150_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index c92fbf24fbac1..47de71e71e310 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -76,7 +76,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_0,
++ .sblk = &sm8150_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -84,7 +84,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_1,
++ .sblk = &sm8150_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -92,7 +92,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_2,
++ .sblk = &sm8150_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -100,7 +100,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK,
+- .sblk = &sdm845_vig_sblk_3,
++ .sblk = &sm8150_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+index 8a19cfa274dea..72a1726371cae 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_0,
++ .sblk = &sm8450_vig_sblk_0,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_1,
++ .sblk = &sm8450_vig_sblk_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+@@ -93,7 +93,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_2,
++ .sblk = &sm8450_vig_sblk_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+@@ -101,7 +101,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SC7180_MASK,
+- .sblk = &sm8250_vig_sblk_3,
++ .sblk = &sm8450_vig_sblk_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 713dfc0797181..77d09f961d866 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -250,14 +250,17 @@ static const uint32_t wb2_formats[] = {
+ * SSPP sub blocks config
+ *************************************************************/
+
++#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
++
+ /* SSPP common configuration */
+-#define _VIG_SBLK(sdma_pri, qseed_ver) \
++#define _VIG_SBLK(sdma_pri, qseed_ver, scaler_ver) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
++ .version = scaler_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+@@ -269,13 +272,14 @@ static const uint32_t wb2_formats[] = {
+ .rotation_cfg = NULL, \
+ }
+
+-#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
++#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, scaler_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .scaler_blk = {.name = "scaler", \
+ .id = qseed_ver, \
++ .version = scaler_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .id = DPU_SSPP_CSC_10BIT, \
+@@ -299,13 +303,17 @@ static const uint32_t wb2_formats[] = {
+ }
+
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
+- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 2));
+
+ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+@@ -314,13 +322,30 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ };
+
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
+ static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 3));
++
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_0 =
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_1 =
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_2 =
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
++static const struct dpu_sspp_sub_blks sm8150_vig_sblk_3 =
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
++ SSPP_SCALER_VER(1, 4));
+
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
+@@ -328,34 +353,60 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
+ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
+
+ static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+- _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+
+ static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+- _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
++ _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0),
++ &dpu_rot_sc7280_cfg_v2);
+
+ static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
+- _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+
+ static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
+- _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
++ _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE,
++ SSPP_SCALER_VER(2, 4));
+
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 0));
++
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
++ _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
++ _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
++static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 1));
+
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
+- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
+- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
+- _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
+- _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
++ _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4,
++ SSPP_SCALER_VER(3, 2));
+ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
+ static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+index 6c9634209e9fc..3f82d84bd1c90 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+@@ -269,7 +269,8 @@ enum {
+ /**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+- * @version: qseed block revision
++ * @version: qseed block revision, on QSEED3+ platforms this is the value of
++ * scaler_blk.base + QSEED3_HW_VERSION registers.
+ */
+ struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+--
+2.43.0
+
--- /dev/null
+From 5f6d30b6e449efd342e21124268b5e77e227a521 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Nov 2023 13:58:12 +0100
+Subject: media: videobuf2: request more buffers for vb2_read
+
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+
+[ Upstream commit 350ab13e1382f2afcc2285041a1e75b80d771c2c ]
+
+The vb2 read support requests 1 buffer, leaving it to the driver
+to increase this number to something that works.
+
+Unfortunately, drivers do not deal with this reliably, and in fact
+this caused problems for the bttv driver and reading from /dev/vbiX,
+causing every other VBI frame to be all 0.
+
+Instead, request as the number of buffers whatever is the maximum of
+2 and q->min_buffers_needed+1.
+
+In order to start streaming you need at least q->min_buffers_needed
+queued buffers, so add 1 buffer for processing. And if that field
+is 0, then choose 2 (again, one buffer is being filled while the
+other one is being processed).
+
+This certainly makes more sense than requesting just 1 buffer, and
+the VBI bttv support is now working again.
+
+It turns out that the old videobuf1 behavior of bttv was to allocate
+8 (video) and 4 (vbi) buffers when used with read(). After the vb2
+conversion that changed to 2 for both. With this patch it is 3, which
+is really all you need.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Fixes: b7ec3212a73a ("media: bttv: convert to vb2")
+Tested-by: Dr. David Alan Gilbert <dave@treblig.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/common/videobuf2/videobuf2-core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index cf6727d9c81f3..468191438849e 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -2648,9 +2648,14 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
+ return -EBUSY;
+
+ /*
+- * Start with count 1, driver can increase it in queue_setup()
++ * Start with q->min_buffers_needed + 1, driver can increase it in
++ * queue_setup()
++ *
++ * 'min_buffers_needed' buffers need to be queued up before you
++ * can start streaming, plus 1 for userspace (or in this case,
++ * kernelspace) processing.
+ */
+- count = 1;
++ count = max(2, q->min_buffers_needed + 1);
+
+ dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+--
+2.43.0
+
--- /dev/null
+From bf9046cbd2a805ce948bca3ee412e4bd213d4972 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Sep 2023 12:18:58 +0200
+Subject: selftests: timers: Convert posix_timers test to generate KTAP output
+
+From: Mark Brown <broonie@kernel.org>
+
+[ Upstream commit 071af0c9e582bc47e379e39490a2bc1adfe4ec68 ]
+
+Currently the posix_timers test does not produce KTAP output but rather a
+custom format. This means that we only get a pass/fail for the suite, not
+for each individual test that the suite does. Convert to using the standard
+kselftest output functions which result in KTAP output being generated.
+
+As part of this fix the printing of diagnostics in the unlikely event that
+the pthread APIs fail, these were using perror() but the API functions
+directly return an error code instead of setting errno.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 6d029c25b71f ("selftests/timers/posix_timers: Reimplement check_timer_distribution()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/timers/posix_timers.c | 81 ++++++++++---------
+ 1 file changed, 41 insertions(+), 40 deletions(-)
+
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 9a42403eaff70..2669c45316b3d 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -76,22 +76,21 @@ static int check_diff(struct timeval start, struct timeval end)
+
+ static int check_itimer(int which)
+ {
++ const char *name;
+ int err;
+ struct timeval start, end;
+ struct itimerval val = {
+ .it_value.tv_sec = DELAY,
+ };
+
+- printf("Check itimer ");
+-
+ if (which == ITIMER_VIRTUAL)
+- printf("virtual... ");
++ name = "ITIMER_VIRTUAL";
+ else if (which == ITIMER_PROF)
+- printf("prof... ");
++ name = "ITIMER_PROF";
+ else if (which == ITIMER_REAL)
+- printf("real... ");
+-
+- fflush(stdout);
++ name = "ITIMER_REAL";
++ else
++ return -1;
+
+ done = 0;
+
+@@ -104,13 +103,13 @@ static int check_itimer(int which)
+
+ err = gettimeofday(&start, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+ err = setitimer(which, &val, NULL);
+ if (err < 0) {
+- perror("Can't set timer\n");
++ ksft_perror("Can't set timer");
+ return -1;
+ }
+
+@@ -123,20 +122,18 @@ static int check_itimer(int which)
+
+ err = gettimeofday(&end, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+- if (!check_diff(start, end))
+- printf("[OK]\n");
+- else
+- printf("[FAIL]\n");
++ ksft_test_result(check_diff(start, end) == 0, "%s\n", name);
+
+ return 0;
+ }
+
+ static int check_timer_create(int which)
+ {
++ const char *type;
+ int err;
+ timer_t id;
+ struct timeval start, end;
+@@ -144,31 +141,32 @@ static int check_timer_create(int which)
+ .it_value.tv_sec = DELAY,
+ };
+
+- printf("Check timer_create() ");
+ if (which == CLOCK_THREAD_CPUTIME_ID) {
+- printf("per thread... ");
++ type = "thread";
+ } else if (which == CLOCK_PROCESS_CPUTIME_ID) {
+- printf("per process... ");
++ type = "process";
++ } else {
++ ksft_print_msg("Unknown timer_create() type %d\n", which);
++ return -1;
+ }
+- fflush(stdout);
+
+ done = 0;
+ err = timer_create(which, NULL, &id);
+ if (err < 0) {
+- perror("Can't create timer\n");
++ ksft_perror("Can't create timer");
+ return -1;
+ }
+ signal(SIGALRM, sig_handler);
+
+ err = gettimeofday(&start, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+ err = timer_settime(id, 0, &val, NULL);
+ if (err < 0) {
+- perror("Can't set timer\n");
++ ksft_perror("Can't set timer");
+ return -1;
+ }
+
+@@ -176,14 +174,12 @@ static int check_timer_create(int which)
+
+ err = gettimeofday(&end, NULL);
+ if (err < 0) {
+- perror("Can't call gettimeofday()\n");
++ ksft_perror("Can't call gettimeofday()");
+ return -1;
+ }
+
+- if (!check_diff(start, end))
+- printf("[OK]\n");
+- else
+- printf("[FAIL]\n");
++ ksft_test_result(check_diff(start, end) == 0,
++ "timer_create() per %s\n", type);
+
+ return 0;
+ }
+@@ -220,25 +216,25 @@ static int check_timer_distribution(void)
+ .it_interval.tv_nsec = 1000 * 1000,
+ };
+
+- printf("Check timer_create() per process signal distribution... ");
+- fflush(stdout);
+-
+ remain = nthreads + 1; /* worker threads + this thread */
+ signal(SIGALRM, distribution_handler);
+ err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
+ if (err < 0) {
+- perror("Can't create timer\n");
++ ksft_perror("Can't create timer");
+ return -1;
+ }
+ err = timer_settime(id, 0, &val, NULL);
+ if (err < 0) {
+- perror("Can't set timer\n");
++ ksft_perror("Can't set timer");
+ return -1;
+ }
+
+ for (i = 0; i < nthreads; i++) {
+- if (pthread_create(&threads[i], NULL, distribution_thread, NULL)) {
+- perror("Can't create thread\n");
++ err = pthread_create(&threads[i], NULL, distribution_thread,
++ NULL);
++ if (err) {
++ ksft_print_msg("Can't create thread: %s (%d)\n",
++ strerror(errno), errno);
+ return -1;
+ }
+ }
+@@ -247,25 +243,30 @@ static int check_timer_distribution(void)
+ while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+
+ for (i = 0; i < nthreads; i++) {
+- if (pthread_join(threads[i], NULL)) {
+- perror("Can't join thread\n");
++ err = pthread_join(threads[i], NULL);
++ if (err) {
++ ksft_print_msg("Can't join thread: %s (%d)\n",
++ strerror(errno), errno);
+ return -1;
+ }
+ }
+
+ if (timer_delete(id)) {
+- perror("Can't delete timer\n");
++ ksft_perror("Can't delete timer");
+ return -1;
+ }
+
+- printf("[OK]\n");
++ ksft_test_result_pass("check_timer_distribution\n");
+ return 0;
+ }
+
+ int main(int argc, char **argv)
+ {
+- printf("Testing posix timers. False negative may happen on CPU execution \n");
+- printf("based timers if other threads run on the CPU...\n");
++ ksft_print_header();
++ ksft_set_plan(6);
++
++ ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
++ ksft_print_msg("based timers if other threads run on the CPU...\n");
+
+ if (check_itimer(ITIMER_VIRTUAL) < 0)
+ return ksft_exit_fail();
+@@ -294,5 +295,5 @@ int main(int argc, char **argv)
+ if (check_timer_distribution() < 0)
+ return ksft_exit_fail();
+
+- return ksft_exit_pass();
++ ksft_finished();
+ }
+--
+2.43.0
+
--- /dev/null
+From 6e7660b2b4f23620ad6d74cdd8b7d6bae733eb87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Apr 2024 16:26:28 -0700
+Subject: selftests: timers: Fix posix_timers ksft_print_msg() warning
+
+From: John Stultz <jstultz@google.com>
+
+[ Upstream commit e4a6bceac98eba3c00e874892736b34ea5fdaca3 ]
+
+After commit 6d029c25b71f ("selftests/timers/posix_timers: Reimplement
+check_timer_distribution()") the following warning occurs when building
+with an older gcc:
+
+posix_timers.c:250:2: warning: format not a string literal and no format arguments [-Wformat-security]
+ 250 | ksft_print_msg(errmsg);
+ | ^~~~~~~~~~~~~~
+
+Fix this up by changing it to ksft_print_msg("%s", errmsg)
+
+Fixes: 6d029c25b71f ("selftests/timers/posix_timers: Reimplement check_timer_distribution()")
+Signed-off-by: John Stultz <jstultz@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Justin Stitt <justinstitt@google.com>
+Acked-by: Shuah Khan <skhan@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240410232637.4135564-1-jstultz@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/timers/posix_timers.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 14355d8472110..c001dd79179d5 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -247,7 +247,7 @@ static int check_timer_distribution(void)
+ ksft_test_result_skip("check signal distribution (old kernel)\n");
+ return 0;
+ err:
+- ksft_print_msg(errmsg);
++ ksft_print_msg("%s", errmsg);
+ return -1;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From cde43342038b1f15cbfb497fbd1d84074c3e49f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Apr 2024 15:38:03 +0200
+Subject: selftests/timers/posix_timers: Reimplement check_timer_distribution()
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+[ Upstream commit 6d029c25b71f2de2838a6f093ce0fa0e69336154 ]
+
+check_timer_distribution() runs ten threads in a busy loop and tries to
+test that the kernel distributes a process posix CPU timer signal to every
+thread over time.
+
+There is not guarantee that this is true even after commit bcb7ee79029d
+("posix-timers: Prefer delivery of signals to the current thread") because
+that commit only avoids waking up the sleeping process leader thread, but
+that has nothing to do with the actual signal delivery.
+
+As the signal is process wide the first thread which observes sigpending
+and wins the race to lock sighand will deliver the signal. Testing shows
+that this hangs on a regular base because some threads never win the race.
+
+The comment "This primarily tests that the kernel does not favour any one."
+is wrong. The kernel does favour a thread which hits the timer interrupt
+when CLOCK_PROCESS_CPUTIME_ID expires.
+
+Rewrite the test so it only checks that the group leader sleeping in join()
+never receives SIGALRM and the thread which burns CPU cycles receives all
+signals.
+
+In older kernels which do not have commit bcb7ee79029d ("posix-timers:
+Prefer delivery of signals to the current thread") the test-case fails
+immediately, the very 1st tick wakes the leader up. Otherwise it quickly
+succeeds after 100 ticks.
+
+CI testing wants to use newer selftest versions on stable kernels. In this
+case the test is guaranteed to fail.
+
+So check in the failure case whether the kernel version is less than v6.3
+and skip the test result in that case.
+
+[ tglx: Massaged change log, renamed the version check helper ]
+
+Fixes: e797203fb3ba ("selftests/timers/posix_timers: Test delivery of signals across threads")
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240409133802.GD29396@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest.h | 13 +++
+ tools/testing/selftests/timers/posix_timers.c | 103 ++++++++----------
+ 2 files changed, 60 insertions(+), 56 deletions(-)
+
+diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
+index 529d29a359002..68d5a93dff8dc 100644
+--- a/tools/testing/selftests/kselftest.h
++++ b/tools/testing/selftests/kselftest.h
+@@ -49,6 +49,7 @@
+ #include <unistd.h>
+ #include <stdarg.h>
+ #include <stdio.h>
++#include <sys/utsname.h>
+ #endif
+
+ #ifndef ARRAY_SIZE
+@@ -327,4 +328,16 @@ static inline int ksft_exit_skip(const char *msg, ...)
+ exit(KSFT_SKIP);
+ }
+
++static inline int ksft_min_kernel_version(unsigned int min_major,
++ unsigned int min_minor)
++{
++ unsigned int major, minor;
++ struct utsname info;
++
++ if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2)
++ ksft_exit_fail_msg("Can't parse kernel version\n");
++
++ return major > min_major || (major == min_major && minor >= min_minor);
++}
++
+ #endif /* __KSELFTEST_H */
+diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
+index 2669c45316b3d..14355d8472110 100644
+--- a/tools/testing/selftests/timers/posix_timers.c
++++ b/tools/testing/selftests/timers/posix_timers.c
+@@ -184,80 +184,71 @@ static int check_timer_create(int which)
+ return 0;
+ }
+
+-int remain;
+-__thread int got_signal;
++static pthread_t ctd_thread;
++static volatile int ctd_count, ctd_failed;
+
+-static void *distribution_thread(void *arg)
++static void ctd_sighandler(int sig)
+ {
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+- return NULL;
++ if (pthread_self() != ctd_thread)
++ ctd_failed = 1;
++ ctd_count--;
+ }
+
+-static void distribution_handler(int nr)
++static void *ctd_thread_func(void *arg)
+ {
+- if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
+- __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
+-}
+-
+-/*
+- * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
+- * timer signals. This primarily tests that the kernel does not favour any one.
+- */
+-static int check_timer_distribution(void)
+-{
+- int err, i;
+- timer_t id;
+- const int nthreads = 10;
+- pthread_t threads[nthreads];
+ struct itimerspec val = {
+ .it_value.tv_sec = 0,
+ .it_value.tv_nsec = 1000 * 1000,
+ .it_interval.tv_sec = 0,
+ .it_interval.tv_nsec = 1000 * 1000,
+ };
++ timer_t id;
+
+- remain = nthreads + 1; /* worker threads + this thread */
+- signal(SIGALRM, distribution_handler);
+- err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
+- if (err < 0) {
+- ksft_perror("Can't create timer");
+- return -1;
+- }
+- err = timer_settime(id, 0, &val, NULL);
+- if (err < 0) {
+- ksft_perror("Can't set timer");
+- return -1;
+- }
++ /* 1/10 seconds to ensure the leader sleeps */
++ usleep(10000);
+
+- for (i = 0; i < nthreads; i++) {
+- err = pthread_create(&threads[i], NULL, distribution_thread,
+- NULL);
+- if (err) {
+- ksft_print_msg("Can't create thread: %s (%d)\n",
+- strerror(errno), errno);
+- return -1;
+- }
+- }
++ ctd_count = 100;
++ if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
++ return "Can't create timer\n";
++ if (timer_settime(id, 0, &val, NULL))
++ return "Can't set timer\n";
+
+- /* Wait for all threads to receive the signal. */
+- while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
++ while (ctd_count > 0 && !ctd_failed)
++ ;
+
+- for (i = 0; i < nthreads; i++) {
+- err = pthread_join(threads[i], NULL);
+- if (err) {
+- ksft_print_msg("Can't join thread: %s (%d)\n",
+- strerror(errno), errno);
+- return -1;
+- }
+- }
++ if (timer_delete(id))
++ return "Can't delete timer\n";
+
+- if (timer_delete(id)) {
+- ksft_perror("Can't delete timer");
+- return -1;
+- }
++ return NULL;
++}
++
++/*
++ * Test that only the running thread receives the timer signal.
++ */
++static int check_timer_distribution(void)
++{
++ const char *errmsg;
+
+- ksft_test_result_pass("check_timer_distribution\n");
++ signal(SIGALRM, ctd_sighandler);
++
++ errmsg = "Can't create thread\n";
++ if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
++ goto err;
++
++ errmsg = "Can't join thread\n";
++ if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
++ goto err;
++
++ if (!ctd_failed)
++ ksft_test_result_pass("check signal distribution\n");
++ else if (ksft_min_kernel_version(6, 3))
++ ksft_test_result_fail("check signal distribution\n");
++ else
++ ksft_test_result_skip("check signal distribution (old kernel)\n");
+ return 0;
++err:
++ ksft_print_msg(errmsg);
++ return -1;
+ }
+
+ int main(int argc, char **argv)
+--
+2.43.0
+
--- /dev/null
+smb-client-remove-extra-chan_count-check-in-__cifs_p.patch
+smb-client-fix-uaf-in-smb2_reconnect_server.patch
+smb3-show-beginning-time-for-per-share-stats.patch
+smb-client-guarantee-refcounted-children-from-parent.patch
+smb-client-refresh-referral-without-acquiring-refpat.patch
+drm-i915-fix-fec-pipe-a-vs.-ddi-a-mixup.patch
+drm-i915-mst-reject-fec-mst-on-icl.patch
+drm-i915-cdclk-fix-voltage_level-programming-edge-ca.patch
+drm-i915-change-intel_pipe_update_-start-end-calling.patch
+drm-i915-extract-intel_crtc_vblank_evade_scanlines.patch
+drm-i915-enable-vrr-later-during-fastsets.patch
+drm-i915-adjust-seamless_m_n-flag-behaviour.patch
+drm-i915-disable-live-m-n-updates-when-using-bigjoin.patch
+selftests-timers-convert-posix_timers-test-to-genera.patch
+selftests-timers-posix_timers-reimplement-check_time.patch
+drm-amd-display-do-not-recursively-call-manual-trigg.patch
+ceph-pass-the-mdsc-to-several-helpers.patch
+ceph-rename-_to_client-to-_to_fs_client.patch
+ceph-redirty-page-before-returning-aop_writepage_act.patch
+selftests-timers-fix-posix_timers-ksft_print_msg-war.patch
+drm-msm-dpu-populate-sspp-scaler-block-version.patch
+media-videobuf2-request-more-buffers-for-vb2_read.patch
--- /dev/null
+From f37e84058ba8a69ab48cf05d112a134ee4806189 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Apr 2024 14:13:10 -0300
+Subject: smb: client: fix UAF in smb2_reconnect_server()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 24a9799aa8efecd0eb55a75e35f9d8e6400063aa ]
+
+The UAF bug is due to smb2_reconnect_server() accessing a session that
+is already being teared down by another thread that is executing
+__cifs_put_smb_ses(). This can happen when (a) the client has
+connection to the server but no session or (b) another thread ends up
+setting @ses->ses_status again to something different than
+SES_EXITING.
+
+To fix this, we need to make sure to unconditionally set
+@ses->ses_status to SES_EXITING and prevent any other threads from
+setting a new status while we're still tearing it down.
+
+The following can be reproduced by adding some delay to right after
+the ipc is freed in __cifs_put_smb_ses() - which will give
+smb2_reconnect_server() worker a chance to run and then accessing
+@ses->ipc:
+
+kinit ...
+mount.cifs //srv/share /mnt/1 -o sec=krb5,nohandlecache,echo_interval=10
+[disconnect srv]
+ls /mnt/1 &>/dev/null
+sleep 30
+kdestroy
+[reconnect srv]
+sleep 10
+umount /mnt/1
+...
+CIFS: VFS: Verify user has a krb5 ticket and keyutils is installed
+CIFS: VFS: \\srv Send error in SessSetup = -126
+CIFS: VFS: Verify user has a krb5 ticket and keyutils is installed
+CIFS: VFS: \\srv Send error in SessSetup = -126
+general protection fault, probably for non-canonical address
+0x6b6b6b6b6b6b6b6b: 0000 [#1] PREEMPT SMP NOPTI
+CPU: 3 PID: 50 Comm: kworker/3:1 Not tainted 6.9.0-rc2 #1
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-1.fc39
+04/01/2014
+Workqueue: cifsiod smb2_reconnect_server [cifs]
+RIP: 0010:__list_del_entry_valid_or_report+0x33/0xf0
+Code: 4f 08 48 85 d2 74 42 48 85 c9 74 59 48 b8 00 01 00 00 00 00 ad
+de 48 39 c2 74 61 48 b8 22 01 00 00 00 00 74 69 <48> 8b 01 48 39 f8 75
+7b 48 8b 72 08 48 39 c6 0f 85 88 00 00 00 b8
+RSP: 0018:ffffc900001bfd70 EFLAGS: 00010a83
+RAX: dead000000000122 RBX: ffff88810da53838 RCX: 6b6b6b6b6b6b6b6b
+RDX: 6b6b6b6b6b6b6b6b RSI: ffffffffc02f6878 RDI: ffff88810da53800
+RBP: ffff88810da53800 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: ffff88810c064000
+R13: 0000000000000001 R14: ffff88810c064000 R15: ffff8881039cc000
+FS: 0000000000000000(0000) GS:ffff888157c00000(0000)
+knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fe3728b1000 CR3: 000000010caa4000 CR4: 0000000000750ef0
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ ? die_addr+0x36/0x90
+ ? exc_general_protection+0x1c1/0x3f0
+ ? asm_exc_general_protection+0x26/0x30
+ ? __list_del_entry_valid_or_report+0x33/0xf0
+ __cifs_put_smb_ses+0x1ae/0x500 [cifs]
+ smb2_reconnect_server+0x4ed/0x710 [cifs]
+ process_one_work+0x205/0x6b0
+ worker_thread+0x191/0x360
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xe2/0x110
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x34/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/connect.c | 83 +++++++++++++++++------------------------
+ 1 file changed, 34 insertions(+), 49 deletions(-)
+
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 556f3c31aedc7..ae35855966afd 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -237,7 +237,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+- /* check if iface is still active */
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
++ spin_unlock(&ses->ses_lock);
++
+ spin_lock(&ses->chan_lock);
+ if (cifs_ses_get_chan_index(ses, server) ==
+ CIFS_INVAL_CHAN_INDEX) {
+@@ -1960,31 +1966,6 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ return rc;
+ }
+
+-/**
+- * cifs_free_ipc - helper to release the session IPC tcon
+- * @ses: smb session to unmount the IPC from
+- *
+- * Needs to be called everytime a session is destroyed.
+- *
+- * On session close, the IPC is closed and the server must release all tcons of the session.
+- * No need to send a tree disconnect here.
+- *
+- * Besides, it will make the server to not close durable and resilient files on session close, as
+- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
+- */
+-static int
+-cifs_free_ipc(struct cifs_ses *ses)
+-{
+- struct cifs_tcon *tcon = ses->tcon_ipc;
+-
+- if (tcon == NULL)
+- return 0;
+-
+- tconInfoFree(tcon);
+- ses->tcon_ipc = NULL;
+- return 0;
+-}
+-
+ static struct cifs_ses *
+ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+@@ -2016,48 +1997,52 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+ struct TCP_Server_Info *server = ses->server;
++ struct cifs_tcon *tcon;
+ unsigned int xid;
+ size_t i;
++ bool do_logoff;
+ int rc;
+
++ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING) {
++ cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
++ __func__, ses->Suid, ses->ses_count, ses->ses_status,
++ ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
++ if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
+ spin_unlock(&ses->ses_lock);
++ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+- spin_unlock(&ses->ses_lock);
++ /* ses_count can never go negative */
++ WARN_ON(ses->ses_count < 0);
+
+- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
+- cifs_dbg(FYI,
+- "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
++ spin_lock(&ses->chan_lock);
++ cifs_chan_clear_need_reconnect(ses, server);
++ spin_unlock(&ses->chan_lock);
+
+- spin_lock(&cifs_tcp_ses_lock);
+- if (--ses->ses_count > 0) {
+- spin_unlock(&cifs_tcp_ses_lock);
+- return;
+- }
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_GOOD)
+- ses->ses_status = SES_EXITING;
++ do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
++ ses->ses_status = SES_EXITING;
++ tcon = ses->tcon_ipc;
++ ses->tcon_ipc = NULL;
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- /* ses_count can never go negative */
+- WARN_ON(ses->ses_count < 0);
+-
+- spin_lock(&ses->ses_lock);
+- if (ses->ses_status == SES_EXITING && server->ops->logoff) {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
++ /*
++ * On session close, the IPC is closed and the server must release all
++ * tcons of the session. No need to send a tree disconnect here.
++ *
++ * Besides, it will make the server to not close durable and resilient
++ * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
++ * SMB2 LOGOFF Request.
++ */
++ tconInfoFree(tcon);
++ if (do_logoff) {
+ xid = get_xid();
+ rc = server->ops->logoff(xid, ses);
+ if (rc)
+ cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
+ __func__, rc);
+ _free_xid(xid);
+- } else {
+- spin_unlock(&ses->ses_lock);
+- cifs_free_ipc(ses);
+ }
+
+ spin_lock(&cifs_tcp_ses_lock);
+--
+2.43.0
+
--- /dev/null
+From aff941d8820ed62522542206f599d0d20efa8e55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Apr 2024 22:37:42 -0500
+Subject: smb: client: guarantee refcounted children from parent session
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 062a7f0ff46eb57aff526897bd2bebfdb1d3046a ]
+
+Avoid potential use-after-free bugs when walking DFS referrals,
+mounting and performing DFS failover by ensuring that all children
+from parent @tcon->ses are also refcounted. They're all needed across
+the entire DFS mount. Get rid of @tcon->dfs_ses_list while we're at
+it, too.
+
+Cc: stable@vger.kernel.org # 6.4+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202404021527.ZlRkIxgv-lkp@intel.com/
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsglob.h | 2 --
+ fs/smb/client/cifsproto.h | 20 +++++++--------
+ fs/smb/client/connect.c | 25 +++++++++++++++----
+ fs/smb/client/dfs.c | 51 ++++++++++++++++++---------------------
+ fs/smb/client/dfs.h | 33 ++++++++++++++++---------
+ fs/smb/client/dfs_cache.c | 11 +--------
+ fs/smb/client/misc.c | 6 -----
+ 7 files changed, 76 insertions(+), 72 deletions(-)
+
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 01d7031194671..68fd61a564089 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1253,7 +1253,6 @@ struct cifs_tcon {
+ struct cached_fids *cfids;
+ /* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+- struct list_head dfs_ses_list;
+ struct delayed_work dfs_cache_work;
+ #endif
+ struct delayed_work query_interfaces; /* query interfaces workqueue job */
+@@ -1775,7 +1774,6 @@ struct cifs_mount_ctx {
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct list_head dfs_ses_list;
+ };
+
+ static inline void __free_dfs_info_param(struct dfs_info3_param *param)
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index ed257612bf0bc..1bdad33580b57 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -716,31 +716,31 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcon_super(struct super_block *sb);
+ int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+
+-/* Put references of @ses and @ses->dfs_root_ses */
++/* Put references of @ses and its children */
+ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+- struct cifs_ses *rses = ses->dfs_root_ses;
++ struct cifs_ses *next;
+
+- __cifs_put_smb_ses(ses);
+- if (rses)
+- __cifs_put_smb_ses(rses);
++ do {
++ next = ses->dfs_root_ses;
++ __cifs_put_smb_ses(ses);
++ } while ((ses = next));
+ }
+
+-/* Get an active reference of @ses and @ses->dfs_root_ses.
++/* Get an active reference of @ses and its children.
+ *
+ * NOTE: make sure to call this function when incrementing reference count of
+ * @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
+ * will also get its reference count incremented.
+ *
+- * cifs_put_smb_ses() will put both references, so call it when you're done.
++ * cifs_put_smb_ses() will put all references, so call it when you're done.
+ */
+ static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
+ {
+ lockdep_assert_held(&cifs_tcp_ses_lock);
+
+- ses->ses_count++;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
++ for (; ses; ses = ses->dfs_root_ses)
++ ses->ses_count++;
+ }
+
+ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index ae35855966afd..c5705de7f9de2 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1863,6 +1863,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ ctx->sectype != ses->sectype)
+ return 0;
+
++ if (ctx->dfs_root_ses != ses->dfs_root_ses)
++ return 0;
++
+ /*
+ * If an existing session is limited to less channels than
+ * requested, it should not be reused
+@@ -2355,9 +2358,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ * need to lock before changing something in the session.
+ */
+ spin_lock(&cifs_tcp_ses_lock);
++ if (ctx->dfs_root_ses)
++ cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
+ ses->dfs_root_ses = ctx->dfs_root_ses;
+- if (ses->dfs_root_ses)
+- ses->dfs_root_ses->ses_count++;
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+@@ -3301,6 +3304,9 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
+ cifs_put_smb_ses(mnt_ctx->ses);
+ else if (mnt_ctx->server)
+ cifs_put_tcp_session(mnt_ctx->server, 0);
++ mnt_ctx->ses = NULL;
++ mnt_ctx->tcon = NULL;
++ mnt_ctx->server = NULL;
+ mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ free_xid(mnt_ctx->xid);
+ }
+@@ -3579,8 +3585,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ bool isdfs;
+ int rc;
+
+- INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
+-
+ rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ if (rc)
+ goto error;
+@@ -3611,7 +3615,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ return rc;
+
+ error:
+- dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+ cifs_mount_put_conns(&mnt_ctx);
+ return rc;
+ }
+@@ -3626,6 +3629,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ goto error;
+
+ rc = cifs_mount_get_tcon(&mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx.server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx.ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx.tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto error;
+
+diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
+index 449c59830039b..3ec965547e3d4 100644
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -66,33 +66,20 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
+ }
+
+ /*
+- * Track individual DFS referral servers used by new DFS mount.
+- *
+- * On success, their lifetime will be shared by final tcon (dfs_ses_list).
+- * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
++ * Get an active reference of @ses so that next call to cifs_put_tcon() won't
++ * release it as any new DFS referrals must go through its IPC tcon.
+ */
+-static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
++static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+- struct dfs_root_ses *root_ses;
+ struct cifs_ses *ses = mnt_ctx->ses;
+
+ if (ses) {
+- root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
+- if (!root_ses)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&root_ses->list);
+-
+ spin_lock(&cifs_tcp_ses_lock);
+ cifs_smb_ses_inc_refcount(ses);
+ spin_unlock(&cifs_tcp_ses_lock);
+- root_ses->ses = ses;
+- list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
+ }
+- /* Select new DFS referral server so that new referrals go through it */
+ ctx->dfs_root_ses = ses;
+- return 0;
+ }
+
+ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
+@@ -185,11 +172,8 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
+ continue;
+ }
+
+- if (is_refsrv) {
+- rc = add_root_smb_session(mnt_ctx);
+- if (rc)
+- goto out;
+- }
++ if (is_refsrv)
++ add_root_smb_session(mnt_ctx);
+
+ rc = ref_walk_advance(rw);
+ if (!rc) {
+@@ -232,6 +216,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_tcon *tcon;
+ char *origin_fullpath;
++ bool new_tcon = true;
+ int rc;
+
+ origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
+@@ -239,6 +224,18 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ return PTR_ERR(origin_fullpath);
+
+ rc = dfs_referral_walk(mnt_ctx);
++ if (!rc) {
++ /*
++ * Prevent superblock from being created with any missing
++ * connections.
++ */
++ if (WARN_ON(!mnt_ctx->server))
++ rc = -EHOSTDOWN;
++ else if (WARN_ON(!mnt_ctx->ses))
++ rc = -EACCES;
++ else if (WARN_ON(!mnt_ctx->tcon))
++ rc = -ENOENT;
++ }
+ if (rc)
+ goto out;
+
+@@ -247,15 +244,14 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ if (!tcon->origin_fullpath) {
+ tcon->origin_fullpath = origin_fullpath;
+ origin_fullpath = NULL;
++ } else {
++ new_tcon = false;
+ }
+ spin_unlock(&tcon->tc_lock);
+
+- if (list_empty(&tcon->dfs_ses_list)) {
+- list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list);
++ if (new_tcon) {
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
+- } else {
+- dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+ }
+
+ out:
+@@ -298,7 +294,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ if (rc)
+ return rc;
+
+- ctx->dfs_root_ses = mnt_ctx->ses;
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+@@ -324,7 +319,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+
+ *isdfs = true;
+ add_root_smb_session(mnt_ctx);
+- return __dfs_mount_share(mnt_ctx);
++ rc = __dfs_mount_share(mnt_ctx);
++ dfs_put_root_smb_sessions(mnt_ctx);
++ return rc;
+ }
+
+ /* Update dfs referral path of superblock */
+diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
+index 875ab7ae57fcd..e5c4dcf837503 100644
+--- a/fs/smb/client/dfs.h
++++ b/fs/smb/client/dfs.h
+@@ -7,7 +7,9 @@
+ #define _CIFS_DFS_H
+
+ #include "cifsglob.h"
++#include "cifsproto.h"
+ #include "fs_context.h"
++#include "dfs_cache.h"
+ #include "cifs_unicode.h"
+ #include <linux/namei.h>
+
+@@ -114,11 +116,6 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
+ ref_walk_tit(rw));
+ }
+
+-struct dfs_root_ses {
+- struct list_head list;
+- struct cifs_ses *ses;
+-};
+-
+ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
+ struct smb3_fs_context *ctx);
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+@@ -133,20 +130,32 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
++ struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
+
+- return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
++ return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), path, ref, tl);
+ }
+
+-static inline void dfs_put_root_smb_sessions(struct list_head *head)
++/*
++ * cifs_get_smb_ses() already guarantees an active reference of
++ * @ses->dfs_root_ses when a new session is created, so we need to put extra
++ * references of all DFS root sessions that were used across the mount process
++ * in dfs_mount_share().
++ */
++static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
+ {
+- struct dfs_root_ses *root, *tmp;
++ const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++ struct cifs_ses *ses = ctx->dfs_root_ses;
++ struct cifs_ses *cur;
++
++ if (!ses)
++ return;
+
+- list_for_each_entry_safe(root, tmp, head, list) {
+- list_del_init(&root->list);
+- cifs_put_smb_ses(root->ses);
+- kfree(root);
++ for (cur = ses; cur; cur = cur->dfs_root_ses) {
++ if (cur->dfs_root_ses)
++ cifs_put_smb_ses(cur->dfs_root_ses);
+ }
++ cifs_put_smb_ses(ses);
+ }
+
+ #endif /* _CIFS_DFS_H */
+diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
+index 508d831fabe37..0552a864ff08f 100644
+--- a/fs/smb/client/dfs_cache.c
++++ b/fs/smb/client/dfs_cache.c
+@@ -1278,21 +1278,12 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ void dfs_cache_refresh(struct work_struct *work)
+ {
+ struct TCP_Server_Info *server;
+- struct dfs_root_ses *rses;
+ struct cifs_tcon *tcon;
+ struct cifs_ses *ses;
+
+ tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
+- ses = tcon->ses;
+- server = ses->server;
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+-
+- list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
+- ses = rses->ses;
++ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
+ server = ses->server;
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath)
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index 51413cb00e199..74627d647818a 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -141,9 +141,6 @@ tcon_info_alloc(bool dir_leases_enabled)
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
+ ret_buf->stats_from_time = ktime_get_real_seconds();
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+-#endif
+
+ return ret_buf;
+ }
+@@ -159,9 +156,6 @@ tconInfoFree(struct cifs_tcon *tcon)
+ atomic_dec(&tconInfoAllocCount);
+ kfree(tcon->nativeFileSystem);
+ kfree_sensitive(tcon->password);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+-#endif
+ kfree(tcon->origin_fullpath);
+ kfree(tcon);
+ }
+--
+2.43.0
+
--- /dev/null
+From bb3d804a61cae527f7dcf796c0db547fe7b8a3a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Apr 2024 22:44:07 -0300
+Subject: smb: client: refresh referral without acquiring refpath_lock
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 0a05ad21d77a188d06481c36d6016805a881bcc0 ]
+
+Avoid refreshing DFS referral with refpath_lock acquired as the I/O
+could block for a while due to a potentially disconnected or slow DFS
+root server and then making other threads - that use same @server and
+don't require a DFS root server - unable to make any progress.
+
+Cc: stable@vger.kernel.org # 6.4+
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/dfs_cache.c | 44 +++++++++++++++++++++------------------
+ 1 file changed, 24 insertions(+), 20 deletions(-)
+
+diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
+index 0552a864ff08f..11c8efecf7aa1 100644
+--- a/fs/smb/client/dfs_cache.c
++++ b/fs/smb/client/dfs_cache.c
+@@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses *ses)
+ return ret;
+ }
+
+-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
++/* Refresh dfs referral of @ses and mark it for reconnect if needed */
++static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
+ {
+ struct TCP_Server_Info *server = ses->server;
+ DFS_CACHE_TGT_LIST(old_tl);
+@@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
+ bool needs_refresh = false;
+ struct cache_entry *ce;
+ unsigned int xid;
++ char *path = NULL;
+ int rc = 0;
+
+ xid = get_xid();
+
++ mutex_lock(&server->refpath_lock);
++ if (server->leaf_fullpath) {
++ path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
++ if (!path)
++ rc = -ENOMEM;
++ }
++ mutex_unlock(&server->refpath_lock);
++ if (!path)
++ goto out;
++
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+@@ -1218,19 +1229,17 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
+ free_xid(xid);
+ dfs_cache_free_tgts(&old_tl);
+ dfs_cache_free_tgts(&new_tl);
+- return rc;
++ kfree(path);
+ }
+
+-static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
++static inline void refresh_ses_referral(struct cifs_ses *ses)
+ {
+- struct TCP_Server_Info *server = tcon->ses->server;
+- struct cifs_ses *ses = tcon->ses;
++ __refresh_ses_referral(ses, false);
++}
+
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
+- mutex_unlock(&server->refpath_lock);
+- return 0;
++static inline void force_refresh_ses_referral(struct cifs_ses *ses)
++{
++ __refresh_ses_referral(ses, true);
+ }
+
+ /**
+@@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+
+- return refresh_tcon(tcon, true);
++ force_refresh_ses_referral(tcon->ses);
++ return 0;
+ }
+
+ /* Refresh all DFS referrals related to DFS tcon */
+ void dfs_cache_refresh(struct work_struct *work)
+ {
+- struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon;
+ struct cifs_ses *ses;
+
+ tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
+
+- for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
+- server = ses->server;
+- mutex_lock(&server->refpath_lock);
+- if (server->leaf_fullpath)
+- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+- mutex_unlock(&server->refpath_lock);
+- }
++ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
++ refresh_ses_referral(ses);
+
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ atomic_read(&dfs_cache_ttl) * HZ);
+--
+2.43.0
+
--- /dev/null
+From 7bddb24a7f251bf659a181bc5149be9bfd867cbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Oct 2023 17:19:53 -0300
+Subject: smb: client: remove extra @chan_count check in __cifs_put_smb_ses()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit c37ed2d7d09869f30d291b9c6cba56ea4f0b0417 ]
+
+If @ses->chan_count <= 1, then for-loop body will not be executed so
+no need to check it twice.
+
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 24a9799aa8ef ("smb: client: fix UAF in smb2_reconnect_server()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/connect.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 97776dd12b6b8..556f3c31aedc7 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2015,9 +2015,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+
+ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+- unsigned int rc, xid;
+- unsigned int chan_count;
+ struct TCP_Server_Info *server = ses->server;
++ unsigned int xid;
++ size_t i;
++ int rc;
+
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+@@ -2063,20 +2064,14 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
+ list_del_init(&ses->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+- chan_count = ses->chan_count;
+-
+ /* close any extra channels */
+- if (chan_count > 1) {
+- int i;
+-
+- for (i = 1; i < chan_count; i++) {
+- if (ses->chans[i].iface) {
+- kref_put(&ses->chans[i].iface->refcount, release_iface);
+- ses->chans[i].iface = NULL;
+- }
+- cifs_put_tcp_session(ses->chans[i].server, 0);
+- ses->chans[i].server = NULL;
++ for (i = 1; i < ses->chan_count; i++) {
++ if (ses->chans[i].iface) {
++ kref_put(&ses->chans[i].iface->refcount, release_iface);
++ ses->chans[i].iface = NULL;
+ }
++ cifs_put_tcp_session(ses->chans[i].server, 0);
++ ses->chans[i].server = NULL;
+ }
+
+ /* we now account for primary channel in iface->refcount */
+--
+2.43.0
+
--- /dev/null
+From 43cdbaaecb9fa7b942ba18cc71c5effc600b95cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jan 2024 16:15:18 -0600
+Subject: smb3: show beginning time for per share stats
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit d8392c203e84ec7daa2afecdb8f4db69bc32416a ]
+
+In analyzing problems, one missing piece of debug data is when the
+mount occurred. A related problem is when collecting stats we don't
+know the period of time the stats covered, ie when this set of stats
+for the tcon started to be collected. To make debugging easier track
+the stats begin time. Set it when the mount occurred at mount time,
+and reset it to current time whenever stats are reset. For example,
+
+...
+1) \\localhost\test
+SMBs: 14 since 2024-01-17 22:17:30 UTC
+Bytes read: 0 Bytes written: 0
+Open files: 0 total (local), 0 open on server
+TreeConnects: 1 total 0 failed
+TreeDisconnects: 0 total 0 failed
+...
+2) \\localhost\scratch
+SMBs: 24 since 2024-01-17 22:16:04 UTC
+Bytes read: 0 Bytes written: 0
+Open files: 0 total (local), 0 open on server
+TreeConnects: 1 total 0 failed
+TreeDisconnects: 0 total 0 failed
+...
+
+Note the time "since ... UTC" is now displayed in /proc/fs/cifs/Stats
+for each share that is mounted.
+
+Suggested-by: Shyam Prasad N <sprasad@microsoft.com>
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 062a7f0ff46e ("smb: client: guarantee refcounted children from parent session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c | 6 ++++--
+ fs/smb/client/cifsglob.h | 1 +
+ fs/smb/client/misc.c | 1 +
+ 3 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 6c85edb8635d0..c53d516459fc4 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -663,6 +663,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ spin_lock(&tcon->stat_lock);
+ tcon->bytes_read = 0;
+ tcon->bytes_written = 0;
++ tcon->stats_from_time = ktime_get_real_seconds();
+ spin_unlock(&tcon->stat_lock);
+ if (server->ops->clear_stats)
+ server->ops->clear_stats(tcon);
+@@ -743,8 +744,9 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ seq_printf(m, "\n%d) %s", i, tcon->tree_name);
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+- seq_printf(m, "\nSMBs: %d",
+- atomic_read(&tcon->num_smbs_sent));
++ seq_printf(m, "\nSMBs: %d since %ptTs UTC",
++ atomic_read(&tcon->num_smbs_sent),
++ &tcon->stats_from_time);
+ if (server->ops->print_stats)
+ server->ops->print_stats(m, tcon);
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index a878b1e5aa313..01d7031194671 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1208,6 +1208,7 @@ struct cifs_tcon {
+ __u64 bytes_read;
+ __u64 bytes_written;
+ spinlock_t stat_lock; /* protects the two fields above */
++ time64_t stats_from_time;
+ FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+ FILE_SYSTEM_UNIX_INFO fsUnixInfo;
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index ef573e3f8e52a..51413cb00e199 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -140,6 +140,7 @@ tcon_info_alloc(bool dir_leases_enabled)
+ spin_lock_init(&ret_buf->stat_lock);
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
++ ret_buf->stats_from_time = ktime_get_real_seconds();
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+ #endif
+--
+2.43.0
+