]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-6.8/smb-client-refresh-referral-without-acquiring-refpath_lock.patch
6.8-stable patches
[thirdparty/kernel/stable-queue.git] / queue-6.8 / smb-client-refresh-referral-without-acquiring-refpath_lock.patch
1 From 0a05ad21d77a188d06481c36d6016805a881bcc0 Mon Sep 17 00:00:00 2001
2 From: Paulo Alcantara <pc@manguebit.com>
3 Date: Mon, 1 Apr 2024 22:44:07 -0300
4 Subject: smb: client: refresh referral without acquiring refpath_lock
5
6 From: Paulo Alcantara <pc@manguebit.com>
7
8 commit 0a05ad21d77a188d06481c36d6016805a881bcc0 upstream.
9
10 Avoid refreshing DFS referral with refpath_lock acquired as the I/O
11 could block for a while due to a potentially disconnected or slow DFS
12 root server and then making other threads - that use same @server and
13 don't require a DFS root server - unable to make any progress.
14
15 Cc: stable@vger.kernel.org # 6.4+
16 Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
17 Signed-off-by: Steve French <stfrench@microsoft.com>
18 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19 ---
20 fs/smb/client/dfs_cache.c | 44 ++++++++++++++++++++++++--------------------
21 1 file changed, 24 insertions(+), 20 deletions(-)
22
23 --- a/fs/smb/client/dfs_cache.c
24 +++ b/fs/smb/client/dfs_cache.c
25 @@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses
26 return ret;
27 }
28
29 -/* Refresh dfs referral of tcon and mark it for reconnect if needed */
30 -static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
31 +/* Refresh dfs referral of @ses and mark it for reconnect if needed */
32 +static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
33 {
34 struct TCP_Server_Info *server = ses->server;
35 DFS_CACHE_TGT_LIST(old_tl);
36 @@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *pa
37 bool needs_refresh = false;
38 struct cache_entry *ce;
39 unsigned int xid;
40 + char *path = NULL;
41 int rc = 0;
42
43 xid = get_xid();
44
45 + mutex_lock(&server->refpath_lock);
46 + if (server->leaf_fullpath) {
47 + path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
48 + if (!path)
49 + rc = -ENOMEM;
50 + }
51 + mutex_unlock(&server->refpath_lock);
52 + if (!path)
53 + goto out;
54 +
55 down_read(&htable_rw_lock);
56 ce = lookup_cache_entry(path);
57 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
58 @@ -1218,19 +1229,17 @@ out:
59 free_xid(xid);
60 dfs_cache_free_tgts(&old_tl);
61 dfs_cache_free_tgts(&new_tl);
62 - return rc;
63 + kfree(path);
64 }
65
66 -static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
67 +static inline void refresh_ses_referral(struct cifs_ses *ses)
68 {
69 - struct TCP_Server_Info *server = tcon->ses->server;
70 - struct cifs_ses *ses = tcon->ses;
71 + __refresh_ses_referral(ses, false);
72 +}
73
74 - mutex_lock(&server->refpath_lock);
75 - if (server->leaf_fullpath)
76 - __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
77 - mutex_unlock(&server->refpath_lock);
78 - return 0;
79 +static inline void force_refresh_ses_referral(struct cifs_ses *ses)
80 +{
81 + __refresh_ses_referral(ses, true);
82 }
83
84 /**
85 @@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_
86 */
87 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
88
89 - return refresh_tcon(tcon, true);
90 + force_refresh_ses_referral(tcon->ses);
91 + return 0;
92 }
93
94 /* Refresh all DFS referrals related to DFS tcon */
95 void dfs_cache_refresh(struct work_struct *work)
96 {
97 - struct TCP_Server_Info *server;
98 struct cifs_tcon *tcon;
99 struct cifs_ses *ses;
100
101 tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
102
103 - for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) {
104 - server = ses->server;
105 - mutex_lock(&server->refpath_lock);
106 - if (server->leaf_fullpath)
107 - __refresh_tcon(server->leaf_fullpath + 1, ses, false);
108 - mutex_unlock(&server->refpath_lock);
109 - }
110 + for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
111 + refresh_ses_referral(ses);
112
113 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
114 atomic_read(&dfs_cache_ttl) * HZ);