From: Greg Kroah-Hartman Date: Sat, 24 Jun 2023 14:11:24 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v4.14.320~34 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=01b3b30a16f725925b0a4fa6de23c7ba45e3e593;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: cifs-clean-up-dfs-referral-cache.patch cifs-fix-potential-deadlock-when-updating-vol-in-cifs_reconnect.patch cifs-get-rid-of-kstrdup_const-d-paths.patch cifs-introduce-helpers-for-finding-tcp-connection.patch cifs-merge-is_path_valid-into-get_normalized_path.patch nilfs2-prevent-general-protection-fault-in-nilfs_clear_dirty_page.patch writeback-fix-dereferencing-null-mapping-host-on-writeback_page_template.patch --- diff --git a/queue-5.4/cifs-clean-up-dfs-referral-cache.patch b/queue-5.4/cifs-clean-up-dfs-referral-cache.patch new file mode 100644 index 00000000000..185b3473499 --- /dev/null +++ b/queue-5.4/cifs-clean-up-dfs-referral-cache.patch @@ -0,0 +1,1255 @@ +From stable-owner@vger.kernel.org Fri Jun 23 23:36:03 2023 +From: Rishabh Bhatnagar +Date: Fri, 23 Jun 2023 21:34:02 +0000 +Subject: cifs: Clean up DFS referral cache +To: , +Cc: , , , Aurelien Aptel , Steve French , Rishabh Bhatnagar +Message-ID: <20230623213406.5596-2-risbhat@amazon.com> + +From: "Paulo Alcantara (SUSE)" + +commit 185352ae6171c845951e21017b2925a6f2795904 upstream. + +Do some renaming and code cleanup. + +No functional changes. + +Signed-off-by: Paulo Alcantara (SUSE) +Reviewed-by: Aurelien Aptel +Signed-off-by: Steve French +Signed-off-by: Rishabh Bhatnagar +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/dfs_cache.c | 565 +++++++++++++++++++++++++--------------------------- + 1 file changed, 279 insertions(+), 286 deletions(-) + +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -22,60 +22,59 @@ + + #include "dfs_cache.h" + +-#define DFS_CACHE_HTABLE_SIZE 32 +-#define DFS_CACHE_MAX_ENTRIES 64 ++#define CACHE_HTABLE_SIZE 32 ++#define CACHE_MAX_ENTRIES 64 + + #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \ + DFSREF_STORAGE_SERVER)) + +-struct dfs_cache_tgt { +- char *t_name; +- struct list_head t_list; ++struct cache_dfs_tgt { ++ char *name; ++ struct list_head list; + }; + +-struct dfs_cache_entry { +- struct hlist_node ce_hlist; +- const char *ce_path; +- int ce_ttl; +- int ce_srvtype; +- int ce_flags; +- struct timespec64 ce_etime; +- int ce_path_consumed; +- int ce_numtgts; +- struct list_head ce_tlist; +- struct dfs_cache_tgt *ce_tgthint; +- struct rcu_head ce_rcu; ++struct cache_entry { ++ struct hlist_node hlist; ++ const char *path; ++ int ttl; ++ int srvtype; ++ int flags; ++ struct timespec64 etime; ++ int path_consumed; ++ int numtgts; ++ struct list_head tlist; ++ struct cache_dfs_tgt *tgthint; ++ struct rcu_head rcu; + }; + +-static struct kmem_cache *dfs_cache_slab __read_mostly; +- +-struct dfs_cache_vol_info { +- char *vi_fullpath; +- struct smb_vol vi_vol; +- char *vi_mntdata; +- struct list_head vi_list; ++struct vol_info { ++ char *fullpath; ++ struct smb_vol smb_vol; ++ char *mntdata; ++ struct list_head list; + }; + +-struct dfs_cache { +- struct mutex dc_lock; +- struct nls_table *dc_nlsc; +- struct list_head dc_vol_list; +- int dc_ttl; +- struct delayed_work dc_refresh; +-}; ++static struct kmem_cache *cache_slab __read_mostly; ++static struct workqueue_struct *dfscache_wq __read_mostly; + +-static struct dfs_cache dfs_cache; ++static int cache_ttl; ++static struct nls_table *cache_nlsc; + + /* + * Number of entries in the cache + */ +-static size_t dfs_cache_count; ++static size_t cache_count; ++ ++static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; ++static DEFINE_MUTEX(list_lock); + +-static DEFINE_MUTEX(dfs_cache_list_lock); +-static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE]; ++static LIST_HEAD(vol_list); ++static DEFINE_MUTEX(vol_lock); + + static void refresh_cache_worker(struct work_struct *work); + ++static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); ++ + static inline bool is_path_valid(const char *path) + { + return path && (strchr(path + 1, '\\') || strchr(path + 1, '/')); +@@ -100,42 +99,42 @@ static inline void free_normalized_path( + kfree(npath); + } + +-static inline bool cache_entry_expired(const struct dfs_cache_entry *ce) ++static inline bool cache_entry_expired(const struct cache_entry *ce) + { + struct timespec64 ts; + + ktime_get_coarse_real_ts64(&ts); +- return timespec64_compare(&ts, &ce->ce_etime) >= 0; ++ return timespec64_compare(&ts, &ce->etime) >= 0; + } + +-static inline void free_tgts(struct dfs_cache_entry *ce) ++static inline void free_tgts(struct cache_entry *ce) + { +- struct dfs_cache_tgt *t, *n; ++ struct cache_dfs_tgt *t, *n; + +- list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) { +- list_del(&t->t_list); +- kfree(t->t_name); ++ list_for_each_entry_safe(t, n, &ce->tlist, list) { ++ list_del(&t->list); ++ kfree(t->name); + kfree(t); + } + } + + static void free_cache_entry(struct rcu_head *rcu) + { +- struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry, +- ce_rcu); +- kmem_cache_free(dfs_cache_slab, ce); ++ struct cache_entry *ce = container_of(rcu, struct cache_entry, rcu); ++ ++ kmem_cache_free(cache_slab, ce); + } + +-static inline void flush_cache_ent(struct dfs_cache_entry *ce) ++static inline void flush_cache_ent(struct cache_entry *ce) + { +- if (hlist_unhashed(&ce->ce_hlist)) ++ if (hlist_unhashed(&ce->hlist)) + return; + +- hlist_del_init_rcu(&ce->ce_hlist); +- kfree_const(ce->ce_path); ++ hlist_del_init_rcu(&ce->hlist); ++ kfree_const(ce->path); + free_tgts(ce); +- dfs_cache_count--; +- call_rcu(&ce->ce_rcu, free_cache_entry); ++ cache_count--; ++ call_rcu(&ce->rcu, free_cache_entry); + } + + static void flush_cache_ents(void) +@@ -143,11 +142,11 @@ static void flush_cache_ents(void) + int i; + + rcu_read_lock(); +- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) { +- struct hlist_head *l = &dfs_cache_htable[i]; +- struct dfs_cache_entry *ce; ++ for (i = 0; i < CACHE_HTABLE_SIZE; i++) { ++ struct hlist_head *l = &cache_htable[i]; ++ struct cache_entry *ce; + +- hlist_for_each_entry_rcu(ce, l, ce_hlist) ++ hlist_for_each_entry_rcu(ce, l, hlist) + flush_cache_ent(ce); + } + rcu_read_unlock(); +@@ -159,35 +158,35 @@ static void flush_cache_ents(void) + static int dfscache_proc_show(struct seq_file *m, void *v) + { + int bucket; +- struct dfs_cache_entry *ce; +- struct dfs_cache_tgt *t; ++ struct cache_entry *ce; ++ struct cache_dfs_tgt *t; + + seq_puts(m, "DFS cache\n---------\n"); + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + + rcu_read_lock(); +- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) { ++ hash_for_each_rcu(cache_htable, bucket, ce, hlist) { + seq_printf(m, + "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," + "interlink=%s,path_consumed=%d,expired=%s\n", +- ce->ce_path, +- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", +- ce->ce_ttl, ce->ce_etime.tv_nsec, +- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no", +- ce->ce_path_consumed, ++ ce->path, ++ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ++ ce->ttl, ce->etime.tv_nsec, ++ IS_INTERLINK_SET(ce->flags) ? "yes" : "no", ++ ce->path_consumed, + cache_entry_expired(ce) ? "yes" : "no"); + +- list_for_each_entry(t, &ce->ce_tlist, t_list) { ++ list_for_each_entry(t, &ce->tlist, list) { + seq_printf(m, " %s%s\n", +- t->t_name, +- ce->ce_tgthint == t ? " (target hint)" : ""); ++ t->name, ++ ce->tgthint == t ? " (target hint)" : ""); + } + + } + rcu_read_unlock(); + +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + return 0; + } + +@@ -205,9 +204,9 @@ static ssize_t dfscache_proc_write(struc + return -EINVAL; + + cifs_dbg(FYI, "clearing dfs cache"); +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + flush_cache_ents(); +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + + return count; + } +@@ -226,25 +225,25 @@ const struct file_operations dfscache_pr + }; + + #ifdef CONFIG_CIFS_DEBUG2 +-static inline void dump_tgts(const struct dfs_cache_entry *ce) ++static inline void dump_tgts(const struct cache_entry *ce) + { +- struct dfs_cache_tgt *t; ++ struct cache_dfs_tgt *t; + + cifs_dbg(FYI, "target list:\n"); +- list_for_each_entry(t, &ce->ce_tlist, t_list) { +- cifs_dbg(FYI, " %s%s\n", t->t_name, +- ce->ce_tgthint == t ? " (target hint)" : ""); ++ list_for_each_entry(t, &ce->tlist, list) { ++ cifs_dbg(FYI, " %s%s\n", t->name, ++ ce->tgthint == t ? " (target hint)" : ""); + } + } + +-static inline void dump_ce(const struct dfs_cache_entry *ce) ++static inline void dump_ce(const struct cache_entry *ce) + { + cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," +- "interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path, +- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl, +- ce->ce_etime.tv_nsec, +- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no", +- ce->ce_path_consumed, ++ "interlink=%s,path_consumed=%d,expired=%s\n", ce->path, ++ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, ++ ce->etime.tv_nsec, ++ IS_INTERLINK_SET(ce->flags) ? "yes" : "no", ++ ce->path_consumed, + cache_entry_expired(ce) ? "yes" : "no"); + dump_tgts(ce); + } +@@ -284,25 +283,34 @@ static inline void dump_refs(const struc + */ + int dfs_cache_init(void) + { ++ int rc; + int i; + +- dfs_cache_slab = kmem_cache_create("cifs_dfs_cache", +- sizeof(struct dfs_cache_entry), 0, +- SLAB_HWCACHE_ALIGN, NULL); +- if (!dfs_cache_slab) ++ dfscache_wq = alloc_workqueue("cifs-dfscache", ++ WQ_FREEZABLE | WQ_MEM_RECLAIM, 1); ++ if (!dfscache_wq) + return -ENOMEM; + +- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) +- INIT_HLIST_HEAD(&dfs_cache_htable[i]); ++ cache_slab = kmem_cache_create("cifs_dfs_cache", ++ sizeof(struct cache_entry), 0, ++ SLAB_HWCACHE_ALIGN, NULL); ++ if (!cache_slab) { ++ rc = -ENOMEM; ++ goto out_destroy_wq; ++ } ++ ++ for (i = 0; i < CACHE_HTABLE_SIZE; i++) ++ INIT_HLIST_HEAD(&cache_htable[i]); + +- INIT_LIST_HEAD(&dfs_cache.dc_vol_list); +- mutex_init(&dfs_cache.dc_lock); +- INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker); +- dfs_cache.dc_ttl = -1; +- dfs_cache.dc_nlsc = load_nls_default(); ++ cache_ttl = -1; ++ cache_nlsc = load_nls_default(); + + cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); + return 0; ++ ++out_destroy_wq: ++ destroy_workqueue(dfscache_wq); ++ return rc; + } + + static inline unsigned int cache_entry_hash(const void *data, int size) +@@ -310,7 +318,7 @@ static inline unsigned int cache_entry_h + unsigned int h; + + h = jhash(data, size, 0); +- return h & (DFS_CACHE_HTABLE_SIZE - 1); ++ return h & (CACHE_HTABLE_SIZE - 1); + } + + /* Check whether second path component of @path is SYSVOL or NETLOGON */ +@@ -325,11 +333,11 @@ static inline bool is_sysvol_or_netlogon + } + + /* Return target hint of a DFS cache entry */ +-static inline char *get_tgt_name(const struct dfs_cache_entry *ce) ++static inline char *get_tgt_name(const struct cache_entry *ce) + { +- struct dfs_cache_tgt *t = ce->ce_tgthint; ++ struct cache_dfs_tgt *t = ce->tgthint; + +- return t ? t->t_name : ERR_PTR(-ENOENT); ++ return t ? t->name : ERR_PTR(-ENOENT); + } + + /* Return expire time out of a new entry's TTL */ +@@ -346,19 +354,19 @@ static inline struct timespec64 get_expi + } + + /* Allocate a new DFS target */ +-static inline struct dfs_cache_tgt *alloc_tgt(const char *name) ++static inline struct cache_dfs_tgt *alloc_tgt(const char *name) + { +- struct dfs_cache_tgt *t; ++ struct cache_dfs_tgt *t; + + t = kmalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return ERR_PTR(-ENOMEM); +- t->t_name = kstrndup(name, strlen(name), GFP_KERNEL); +- if (!t->t_name) { ++ t->name = kstrndup(name, strlen(name), GFP_KERNEL); ++ if (!t->name) { + kfree(t); + return ERR_PTR(-ENOMEM); + } +- INIT_LIST_HEAD(&t->t_list); ++ INIT_LIST_HEAD(&t->list); + return t; + } + +@@ -367,63 +375,63 @@ static inline struct dfs_cache_tgt *allo + * target hint. + */ + static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, +- struct dfs_cache_entry *ce, const char *tgthint) ++ struct cache_entry *ce, const char *tgthint) + { + int i; + +- ce->ce_ttl = refs[0].ttl; +- ce->ce_etime = get_expire_time(ce->ce_ttl); +- ce->ce_srvtype = refs[0].server_type; +- ce->ce_flags = refs[0].ref_flag; +- ce->ce_path_consumed = refs[0].path_consumed; ++ ce->ttl = refs[0].ttl; ++ ce->etime = get_expire_time(ce->ttl); ++ ce->srvtype = refs[0].server_type; ++ ce->flags = refs[0].ref_flag; ++ ce->path_consumed = refs[0].path_consumed; + + for (i = 0; i < numrefs; i++) { +- struct dfs_cache_tgt *t; ++ struct cache_dfs_tgt *t; + + t = alloc_tgt(refs[i].node_name); + if (IS_ERR(t)) { + free_tgts(ce); + return PTR_ERR(t); + } +- if (tgthint && !strcasecmp(t->t_name, tgthint)) { +- list_add(&t->t_list, &ce->ce_tlist); ++ if (tgthint && !strcasecmp(t->name, tgthint)) { ++ list_add(&t->list, &ce->tlist); + tgthint = NULL; + } else { +- list_add_tail(&t->t_list, &ce->ce_tlist); ++ list_add_tail(&t->list, &ce->tlist); + } +- ce->ce_numtgts++; ++ ce->numtgts++; + } + +- ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist, +- struct dfs_cache_tgt, t_list); ++ ce->tgthint = list_first_entry_or_null(&ce->tlist, ++ struct cache_dfs_tgt, list); + + return 0; + } + + /* Allocate a new cache entry */ +-static struct dfs_cache_entry * +-alloc_cache_entry(const char *path, const struct dfs_info3_param *refs, +- int numrefs) ++static struct cache_entry *alloc_cache_entry(const char *path, ++ const struct dfs_info3_param *refs, ++ int numrefs) + { +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + int rc; + +- ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL); ++ ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL); + if (!ce) + return ERR_PTR(-ENOMEM); + +- ce->ce_path = kstrdup_const(path, GFP_KERNEL); +- if (!ce->ce_path) { +- kmem_cache_free(dfs_cache_slab, ce); ++ ce->path = kstrdup_const(path, GFP_KERNEL); ++ if (!ce->path) { ++ kmem_cache_free(cache_slab, ce); + return ERR_PTR(-ENOMEM); + } +- INIT_HLIST_NODE(&ce->ce_hlist); +- INIT_LIST_HEAD(&ce->ce_tlist); ++ INIT_HLIST_NODE(&ce->hlist); ++ INIT_LIST_HEAD(&ce->tlist); + + rc = copy_ref_data(refs, numrefs, ce, NULL); + if (rc) { +- kfree_const(ce->ce_path); +- kmem_cache_free(dfs_cache_slab, ce); ++ kfree_const(ce->path); ++ kmem_cache_free(cache_slab, ce); + ce = ERR_PTR(rc); + } + return ce; +@@ -432,13 +440,13 @@ alloc_cache_entry(const char *path, cons + static void remove_oldest_entry(void) + { + int bucket; +- struct dfs_cache_entry *ce; +- struct dfs_cache_entry *to_del = NULL; ++ struct cache_entry *ce; ++ struct cache_entry *to_del = NULL; + + rcu_read_lock(); +- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) { +- if (!to_del || timespec64_compare(&ce->ce_etime, +- &to_del->ce_etime) < 0) ++ hash_for_each_rcu(cache_htable, bucket, ce, hlist) { ++ if (!to_del || timespec64_compare(&ce->etime, ++ &to_del->etime) < 0) + to_del = ce; + } + if (!to_del) { +@@ -453,93 +461,84 @@ out: + } + + /* Add a new DFS cache entry */ +-static inline struct dfs_cache_entry * ++static inline struct cache_entry * + add_cache_entry(unsigned int hash, const char *path, + const struct dfs_info3_param *refs, int numrefs) + { +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + + ce = alloc_cache_entry(path, refs, numrefs); + if (IS_ERR(ce)) + return ce; + +- hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]); ++ hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]); + +- mutex_lock(&dfs_cache.dc_lock); +- if (dfs_cache.dc_ttl < 0) { +- dfs_cache.dc_ttl = ce->ce_ttl; +- queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh, +- dfs_cache.dc_ttl * HZ); ++ mutex_lock(&vol_lock); ++ if (cache_ttl < 0) { ++ cache_ttl = ce->ttl; ++ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); + } else { +- dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl); +- mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh, +- dfs_cache.dc_ttl * HZ); ++ cache_ttl = min_t(int, cache_ttl, ce->ttl); ++ mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); + } +- mutex_unlock(&dfs_cache.dc_lock); ++ mutex_unlock(&vol_lock); + + return ce; + } + +-static struct dfs_cache_entry *__find_cache_entry(unsigned int hash, +- const char *path) ++/* ++ * Find a DFS cache entry in hash table and optionally check prefix path against ++ * @path. ++ * Use whole path components in the match. ++ * Return ERR_PTR(-ENOENT) if the entry is not found. ++ */ ++static struct cache_entry *lookup_cache_entry(const char *path, ++ unsigned int *hash) + { +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; ++ unsigned int h; + bool found = false; + +- rcu_read_lock(); +- hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) { +- if (!strcasecmp(path, ce->ce_path)) { +-#ifdef CONFIG_CIFS_DEBUG2 +- char *name = get_tgt_name(ce); ++ h = cache_entry_hash(path, strlen(path)); + +- if (IS_ERR(name)) { +- rcu_read_unlock(); +- return ERR_CAST(name); +- } +- cifs_dbg(FYI, "%s: cache hit\n", __func__); +- cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name); +-#endif ++ rcu_read_lock(); ++ hlist_for_each_entry_rcu(ce, &cache_htable[h], hlist) { ++ if (!strcasecmp(path, ce->path)) { + found = true; ++ dump_ce(ce); + break; + } + } + rcu_read_unlock(); +- return found ? ce : ERR_PTR(-ENOENT); +-} + +-/* +- * Find a DFS cache entry in hash table and optionally check prefix path against +- * @path. +- * Use whole path components in the match. +- * Return ERR_PTR(-ENOENT) if the entry is not found. +- */ +-static inline struct dfs_cache_entry *find_cache_entry(const char *path, +- unsigned int *hash) +-{ +- *hash = cache_entry_hash(path, strlen(path)); +- return __find_cache_entry(*hash, path); ++ if (!found) ++ ce = ERR_PTR(-ENOENT); ++ if (hash) ++ *hash = h; ++ ++ return ce; + } + + static inline void destroy_slab_cache(void) + { + rcu_barrier(); +- kmem_cache_destroy(dfs_cache_slab); ++ kmem_cache_destroy(cache_slab); + } + +-static inline void free_vol(struct dfs_cache_vol_info *vi) ++static inline void free_vol(struct vol_info *vi) + { +- list_del(&vi->vi_list); +- kfree(vi->vi_fullpath); +- kfree(vi->vi_mntdata); +- cifs_cleanup_volume_info_contents(&vi->vi_vol); ++ list_del(&vi->list); ++ kfree(vi->fullpath); ++ kfree(vi->mntdata); ++ cifs_cleanup_volume_info_contents(&vi->smb_vol); + kfree(vi); + } + + static inline void free_vol_list(void) + { +- struct dfs_cache_vol_info *vi, *nvi; ++ struct vol_info *vi, *nvi; + +- list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list) ++ list_for_each_entry_safe(vi, nvi, &vol_list, list) + free_vol(vi); + } + +@@ -548,40 +547,38 @@ static inline void free_vol_list(void) + */ + void dfs_cache_destroy(void) + { +- cancel_delayed_work_sync(&dfs_cache.dc_refresh); +- unload_nls(dfs_cache.dc_nlsc); ++ cancel_delayed_work_sync(&refresh_task); ++ unload_nls(cache_nlsc); + free_vol_list(); +- mutex_destroy(&dfs_cache.dc_lock); +- + flush_cache_ents(); + destroy_slab_cache(); +- mutex_destroy(&dfs_cache_list_lock); ++ destroy_workqueue(dfscache_wq); + + cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__); + } + +-static inline struct dfs_cache_entry * ++static inline struct cache_entry * + __update_cache_entry(const char *path, const struct dfs_info3_param *refs, + int numrefs) + { + int rc; + unsigned int h; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + char *s, *th = NULL; + +- ce = find_cache_entry(path, &h); ++ ce = lookup_cache_entry(path, &h); + if (IS_ERR(ce)) + return ce; + +- if (ce->ce_tgthint) { +- s = ce->ce_tgthint->t_name; ++ if (ce->tgthint) { ++ s = ce->tgthint->name; + th = kstrndup(s, strlen(s), GFP_KERNEL); + if (!th) + return ERR_PTR(-ENOMEM); + } + + free_tgts(ce); +- ce->ce_numtgts = 0; ++ ce->numtgts = 0; + + rc = copy_ref_data(refs, numrefs, ce, th); + kfree(th); +@@ -593,10 +590,10 @@ __update_cache_entry(const char *path, c + } + + /* Update an expired cache entry by getting a new DFS referral from server */ +-static struct dfs_cache_entry * ++static struct cache_entry * + update_cache_entry(const unsigned int xid, struct cifs_ses *ses, + const struct nls_table *nls_codepage, int remap, +- const char *path, struct dfs_cache_entry *ce) ++ const char *path, struct cache_entry *ce) + { + int rc; + struct dfs_info3_param *refs = NULL; +@@ -636,20 +633,20 @@ update_cache_entry(const unsigned int xi + * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to + * handle them properly. + */ +-static struct dfs_cache_entry * ++static struct cache_entry * + do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, + const struct nls_table *nls_codepage, int remap, + const char *path, bool noreq) + { + int rc; + unsigned int h; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + struct dfs_info3_param *nrefs; + int numnrefs; + + cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); + +- ce = find_cache_entry(path, &h); ++ ce = lookup_cache_entry(path, &h); + if (IS_ERR(ce)) { + cifs_dbg(FYI, "%s: cache miss\n", __func__); + /* +@@ -690,9 +687,9 @@ do_dfs_cache_find(const unsigned int xid + + cifs_dbg(FYI, "%s: new cache entry\n", __func__); + +- if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) { ++ if (cache_count >= CACHE_MAX_ENTRIES) { + cifs_dbg(FYI, "%s: reached max cache size (%d)", +- __func__, DFS_CACHE_MAX_ENTRIES); ++ __func__, CACHE_MAX_ENTRIES); + remove_oldest_entry(); + } + ce = add_cache_entry(h, path, nrefs, numnrefs); +@@ -701,7 +698,7 @@ do_dfs_cache_find(const unsigned int xid + if (IS_ERR(ce)) + return ce; + +- dfs_cache_count++; ++ cache_count++; + } + + dump_ce(ce); +@@ -723,7 +720,7 @@ do_dfs_cache_find(const unsigned int xid + } + + /* Set up a new DFS referral from a given cache entry */ +-static int setup_ref(const char *path, const struct dfs_cache_entry *ce, ++static int setup_ref(const char *path, const struct cache_entry *ce, + struct dfs_info3_param *ref, const char *tgt) + { + int rc; +@@ -736,7 +733,7 @@ static int setup_ref(const char *path, c + if (!ref->path_name) + return -ENOMEM; + +- ref->path_consumed = ce->ce_path_consumed; ++ ref->path_consumed = ce->path_consumed; + + ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL); + if (!ref->node_name) { +@@ -744,9 +741,9 @@ static int setup_ref(const char *path, c + goto err_free_path; + } + +- ref->ttl = ce->ce_ttl; +- ref->server_type = ce->ce_srvtype; +- ref->ref_flag = ce->ce_flags; ++ ref->ttl = ce->ttl; ++ ref->server_type = ce->srvtype; ++ ref->ref_flag = ce->flags; + + return 0; + +@@ -757,25 +754,25 @@ err_free_path: + } + + /* Return target list of a DFS cache entry */ +-static int get_tgt_list(const struct dfs_cache_entry *ce, ++static int get_tgt_list(const struct cache_entry *ce, + struct dfs_cache_tgt_list *tl) + { + int rc; + struct list_head *head = &tl->tl_list; +- struct dfs_cache_tgt *t; ++ struct cache_dfs_tgt *t; + struct dfs_cache_tgt_iterator *it, *nit; + + memset(tl, 0, sizeof(*tl)); + INIT_LIST_HEAD(head); + +- list_for_each_entry(t, &ce->ce_tlist, t_list) { ++ list_for_each_entry(t, &ce->tlist, list) { + it = kzalloc(sizeof(*it), GFP_KERNEL); + if (!it) { + rc = -ENOMEM; + goto err_free_it; + } + +- it->it_name = kstrndup(t->t_name, strlen(t->t_name), ++ it->it_name = kstrndup(t->name, strlen(t->name), + GFP_KERNEL); + if (!it->it_name) { + kfree(it); +@@ -783,12 +780,12 @@ static int get_tgt_list(const struct dfs + goto err_free_it; + } + +- if (ce->ce_tgthint == t) ++ if (ce->tgthint == t) + list_add(&it->it_list, head); + else + list_add_tail(&it->it_list, head); + } +- tl->tl_numtgts = ce->ce_numtgts; ++ tl->tl_numtgts = ce->numtgts; + + return 0; + +@@ -829,7 +826,7 @@ int dfs_cache_find(const unsigned int xi + { + int rc; + char *npath; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + + if (unlikely(!is_path_valid(path))) + return -EINVAL; +@@ -838,7 +835,7 @@ int dfs_cache_find(const unsigned int xi + if (rc) + return rc; + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); + if (!IS_ERR(ce)) { + if (ref) +@@ -850,7 +847,7 @@ int dfs_cache_find(const unsigned int xi + } else { + rc = PTR_ERR(ce); + } +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + free_normalized_path(path, npath); + return rc; + } +@@ -876,7 +873,7 @@ int dfs_cache_noreq_find(const char *pat + { + int rc; + char *npath; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + + if (unlikely(!is_path_valid(path))) + return -EINVAL; +@@ -885,7 +882,7 @@ int dfs_cache_noreq_find(const char *pat + if (rc) + return rc; + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true); + if (IS_ERR(ce)) { + rc = PTR_ERR(ce); +@@ -899,7 +896,7 @@ int dfs_cache_noreq_find(const char *pat + if (!rc && tgt_list) + rc = get_tgt_list(ce, tgt_list); + out: +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + free_normalized_path(path, npath); + return rc; + } +@@ -929,8 +926,8 @@ int dfs_cache_update_tgthint(const unsig + { + int rc; + char *npath; +- struct dfs_cache_entry *ce; +- struct dfs_cache_tgt *t; ++ struct cache_entry *ce; ++ struct cache_dfs_tgt *t; + + if (unlikely(!is_path_valid(path))) + return -EINVAL; +@@ -941,7 +938,7 @@ int dfs_cache_update_tgthint(const unsig + + cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); + if (IS_ERR(ce)) { + rc = PTR_ERR(ce); +@@ -950,14 +947,14 @@ int dfs_cache_update_tgthint(const unsig + + rc = 0; + +- t = ce->ce_tgthint; ++ t = ce->tgthint; + +- if (likely(!strcasecmp(it->it_name, t->t_name))) ++ if (likely(!strcasecmp(it->it_name, t->name))) + goto out; + +- list_for_each_entry(t, &ce->ce_tlist, t_list) { +- if (!strcasecmp(t->t_name, it->it_name)) { +- ce->ce_tgthint = t; ++ list_for_each_entry(t, &ce->tlist, list) { ++ if (!strcasecmp(t->name, it->it_name)) { ++ ce->tgthint = t; + cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, + it->it_name); + break; +@@ -965,7 +962,7 @@ int dfs_cache_update_tgthint(const unsig + } + + out: +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + free_normalized_path(path, npath); + return rc; + } +@@ -989,8 +986,8 @@ int dfs_cache_noreq_update_tgthint(const + { + int rc; + char *npath; +- struct dfs_cache_entry *ce; +- struct dfs_cache_tgt *t; ++ struct cache_entry *ce; ++ struct cache_dfs_tgt *t; + + if (unlikely(!is_path_valid(path)) || !it) + return -EINVAL; +@@ -1001,7 +998,7 @@ int dfs_cache_noreq_update_tgthint(const + + cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + + ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true); + if (IS_ERR(ce)) { +@@ -1011,14 +1008,14 @@ int dfs_cache_noreq_update_tgthint(const + + rc = 0; + +- t = ce->ce_tgthint; ++ t = ce->tgthint; + +- if (unlikely(!strcasecmp(it->it_name, t->t_name))) ++ if (unlikely(!strcasecmp(it->it_name, t->name))) + goto out; + +- list_for_each_entry(t, &ce->ce_tlist, t_list) { +- if (!strcasecmp(t->t_name, it->it_name)) { +- ce->ce_tgthint = t; ++ list_for_each_entry(t, &ce->tlist, list) { ++ if (!strcasecmp(t->name, it->it_name)) { ++ ce->tgthint = t; + cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, + it->it_name); + break; +@@ -1026,7 +1023,7 @@ int dfs_cache_noreq_update_tgthint(const + } + + out: +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + free_normalized_path(path, npath); + return rc; + } +@@ -1047,7 +1044,7 @@ int dfs_cache_get_tgt_referral(const cha + { + int rc; + char *npath; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + unsigned int h; + + if (!it || !ref) +@@ -1061,9 +1058,9 @@ int dfs_cache_get_tgt_referral(const cha + + cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); + +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + +- ce = find_cache_entry(npath, &h); ++ ce = lookup_cache_entry(npath, &h); + if (IS_ERR(ce)) { + rc = PTR_ERR(ce); + goto out; +@@ -1074,7 +1071,7 @@ int dfs_cache_get_tgt_referral(const cha + rc = setup_ref(path, ce, ref, it->it_name); + + out: +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + free_normalized_path(path, npath); + return rc; + } +@@ -1085,7 +1082,7 @@ static int dup_vol(struct smb_vol *vol, + + if (vol->username) { + new->username = kstrndup(vol->username, strlen(vol->username), +- GFP_KERNEL); ++ GFP_KERNEL); + if (!new->username) + return -ENOMEM; + } +@@ -1103,7 +1100,7 @@ static int dup_vol(struct smb_vol *vol, + } + if (vol->domainname) { + new->domainname = kstrndup(vol->domainname, +- strlen(vol->domainname), GFP_KERNEL); ++ strlen(vol->domainname), GFP_KERNEL); + if (!new->domainname) + goto err_free_unc; + } +@@ -1150,7 +1147,7 @@ err_free_username: + int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath) + { + int rc; +- struct dfs_cache_vol_info *vi; ++ struct vol_info *vi; + + if (!vol || !fullpath || !mntdata) + return -EINVAL; +@@ -1161,38 +1158,37 @@ int dfs_cache_add_vol(char *mntdata, str + if (!vi) + return -ENOMEM; + +- vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL); +- if (!vi->vi_fullpath) { ++ vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL); ++ if (!vi->fullpath) { + rc = -ENOMEM; + goto err_free_vi; + } + +- rc = dup_vol(vol, &vi->vi_vol); ++ rc = dup_vol(vol, &vi->smb_vol); + if (rc) + goto err_free_fullpath; + +- vi->vi_mntdata = mntdata; ++ vi->mntdata = mntdata; + +- mutex_lock(&dfs_cache.dc_lock); +- list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list); +- mutex_unlock(&dfs_cache.dc_lock); ++ mutex_lock(&vol_lock); ++ list_add_tail(&vi->list, &vol_list); ++ mutex_unlock(&vol_lock); + return 0; + + err_free_fullpath: +- kfree(vi->vi_fullpath); ++ kfree(vi->fullpath); + err_free_vi: + kfree(vi); + return rc; + } + +-static inline struct dfs_cache_vol_info *find_vol(const char *fullpath) ++static inline struct vol_info *find_vol(const char *fullpath) + { +- struct dfs_cache_vol_info *vi; ++ struct vol_info *vi; + +- list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) { +- cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__, +- vi->vi_fullpath); +- if (!strcasecmp(vi->vi_fullpath, fullpath)) ++ list_for_each_entry(vi, &vol_list, list) { ++ cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath); ++ if (!strcasecmp(vi->fullpath, fullpath)) + return vi; + } + return ERR_PTR(-ENOENT); +@@ -1209,14 +1205,14 @@ static inline struct dfs_cache_vol_info + int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) + { + int rc; +- struct dfs_cache_vol_info *vi; ++ struct vol_info *vi; + + if (!fullpath || !server) + return -EINVAL; + + cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); + +- mutex_lock(&dfs_cache.dc_lock); ++ mutex_lock(&vol_lock); + + vi = find_vol(fullpath); + if (IS_ERR(vi)) { +@@ -1225,12 +1221,12 @@ int dfs_cache_update_vol(const char *ful + } + + cifs_dbg(FYI, "%s: updating volume info\n", __func__); +- memcpy(&vi->vi_vol.dstaddr, &server->dstaddr, +- sizeof(vi->vi_vol.dstaddr)); ++ memcpy(&vi->smb_vol.dstaddr, &server->dstaddr, ++ sizeof(vi->smb_vol.dstaddr)); + rc = 0; + + out: +- mutex_unlock(&dfs_cache.dc_lock); ++ mutex_unlock(&vol_lock); + return rc; + } + +@@ -1241,18 +1237,18 @@ out: + */ + void dfs_cache_del_vol(const char *fullpath) + { +- struct dfs_cache_vol_info *vi; ++ struct vol_info *vi; + + if (!fullpath || !*fullpath) + return; + + cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); + +- mutex_lock(&dfs_cache.dc_lock); ++ mutex_lock(&vol_lock); + vi = find_vol(fullpath); + if (!IS_ERR(vi)) + free_vol(vi); +- mutex_unlock(&dfs_cache.dc_lock); ++ mutex_unlock(&vol_lock); + } + + /* Get all tcons that are within a DFS namespace and can be refreshed */ +@@ -1280,7 +1276,7 @@ static void get_tcons(struct TCP_Server_ + spin_unlock(&cifs_tcp_ses_lock); + } + +-static inline bool is_dfs_link(const char *path) ++static bool is_dfs_link(const char *path) + { + char *s; + +@@ -1290,7 +1286,7 @@ static inline bool is_dfs_link(const cha + return !!strchr(s + 1, '\\'); + } + +-static inline char *get_dfs_root(const char *path) ++static char *get_dfs_root(const char *path) + { + char *s, *npath; + +@@ -1310,8 +1306,9 @@ static inline char *get_dfs_root(const c + } + + /* Find root SMB session out of a DFS link path */ +-static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi, +- struct cifs_tcon *tcon, const char *path) ++static struct cifs_ses *find_root_ses(struct vol_info *vi, ++ struct cifs_tcon *tcon, ++ const char *path) + { + char *rpath; + int rc; +@@ -1333,8 +1330,7 @@ static struct cifs_ses *find_root_ses(st + goto out; + } + +- mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref, +- &devname); ++ mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref, &devname); + free_dfs_info_param(&ref); + + if (IS_ERR(mdata)) { +@@ -1373,14 +1369,13 @@ out: + } + + /* Refresh DFS cache entry from a given tcon */ +-static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi, +- struct cifs_tcon *tcon) ++static void refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon) + { + int rc = 0; + unsigned int xid; + char *path, *npath; + unsigned int h; +- struct dfs_cache_entry *ce; ++ struct cache_entry *ce; + struct dfs_info3_param *refs = NULL; + int numrefs = 0; + struct cifs_ses *root_ses = NULL, *ses; +@@ -1393,9 +1388,9 @@ static void do_refresh_tcon(struct dfs_c + if (rc) + goto out; + +- mutex_lock(&dfs_cache_list_lock); +- ce = find_cache_entry(npath, &h); +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); ++ ce = lookup_cache_entry(npath, &h); ++ mutex_unlock(&list_lock); + + if (IS_ERR(ce)) { + rc = PTR_ERR(ce); +@@ -1421,12 +1416,12 @@ static void do_refresh_tcon(struct dfs_c + rc = -EOPNOTSUPP; + } else { + rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs, +- &numrefs, dc->dc_nlsc, ++ &numrefs, cache_nlsc, + tcon->remap); + if (!rc) { +- mutex_lock(&dfs_cache_list_lock); ++ mutex_lock(&list_lock); + ce = __update_cache_entry(npath, refs, numrefs); +- mutex_unlock(&dfs_cache_list_lock); ++ mutex_unlock(&list_lock); + dump_refs(refs, numrefs); + free_dfs_info_array(refs, numrefs); + if (IS_ERR(ce)) +@@ -1448,30 +1443,28 @@ out: + */ + static void refresh_cache_worker(struct work_struct *work) + { +- struct dfs_cache *dc = container_of(work, struct dfs_cache, +- dc_refresh.work); +- struct dfs_cache_vol_info *vi; ++ struct vol_info *vi; + struct TCP_Server_Info *server; + LIST_HEAD(list); + struct cifs_tcon *tcon, *ntcon; + +- mutex_lock(&dc->dc_lock); ++ mutex_lock(&vol_lock); + +- list_for_each_entry(vi, &dc->dc_vol_list, vi_list) { +- server = cifs_find_tcp_session(&vi->vi_vol); ++ list_for_each_entry(vi, &vol_list, list) { ++ server = cifs_find_tcp_session(&vi->smb_vol); + if (IS_ERR_OR_NULL(server)) + continue; + if (server->tcpStatus != CifsGood) + goto next; + get_tcons(server, &list); + list_for_each_entry_safe(tcon, ntcon, &list, ulist) { +- do_refresh_tcon(dc, vi, tcon); ++ refresh_tcon(vi, tcon); + list_del_init(&tcon->ulist); + cifs_put_tcon(tcon); + } + next: + cifs_put_tcp_session(server, 0); + } +- queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ); +- mutex_unlock(&dc->dc_lock); ++ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); ++ mutex_unlock(&vol_lock); + } diff --git a/queue-5.4/cifs-fix-potential-deadlock-when-updating-vol-in-cifs_reconnect.patch b/queue-5.4/cifs-fix-potential-deadlock-when-updating-vol-in-cifs_reconnect.patch new file mode 100644 index 00000000000..72fc421ef47 --- /dev/null +++ b/queue-5.4/cifs-fix-potential-deadlock-when-updating-vol-in-cifs_reconnect.patch @@ -0,0 +1,270 @@ +From stable-owner@vger.kernel.org Fri Jun 23 23:35:43 2023 +From: Rishabh Bhatnagar +Date: Fri, 23 Jun 2023 21:34:06 +0000 +Subject: cifs: Fix potential deadlock when updating vol in cifs_reconnect() +To: , +Cc: , , , Steve French , "Rishabh Bhatnagar" +Message-ID: <20230623213406.5596-6-risbhat@amazon.com> + +From: "Paulo Alcantara (SUSE)" + +commit 06d57378bcc9b2c33640945174842115593795d1 upstream. + +We can't acquire volume lock while refreshing the DFS cache because +cifs_reconnect() may call dfs_cache_update_vol() while we are walking +through the volume list. + +To prevent that, make vol_info refcounted, create a temp list with all +volumes eligible for refreshing, and then use it without any locks +held. + +Besides, replace vol_lock with a spinlock and protect cache_ttl from +concurrent accesses or changes. + +Signed-off-by: Paulo Alcantara (SUSE) +Signed-off-by: Steve French +Signed-off-by: Rishabh Bhatnagar +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/dfs_cache.c | 109 ++++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 77 insertions(+), 32 deletions(-) + +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -49,15 +49,20 @@ struct cache_entry { + + struct vol_info { + char *fullpath; ++ spinlock_t smb_vol_lock; + struct smb_vol smb_vol; + char *mntdata; + struct list_head list; ++ struct list_head rlist; ++ struct kref refcnt; + }; + + static struct kmem_cache *cache_slab __read_mostly; + static struct workqueue_struct *dfscache_wq __read_mostly; + + static int cache_ttl; ++static DEFINE_SPINLOCK(cache_ttl_lock); ++ + static struct nls_table *cache_nlsc; + + /* +@@ -69,7 +74,7 @@ static struct hlist_head cache_htable[CA + static DEFINE_MUTEX(list_lock); + + static LIST_HEAD(vol_list); +-static DEFINE_MUTEX(vol_lock); ++static DEFINE_SPINLOCK(vol_list_lock); + + static void refresh_cache_worker(struct work_struct *work); + +@@ -300,7 +305,6 @@ int dfs_cache_init(void) + for (i = 0; i < CACHE_HTABLE_SIZE; i++) + INIT_HLIST_HEAD(&cache_htable[i]); + +- cache_ttl = -1; + cache_nlsc = load_nls_default(); + + cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); +@@ -471,15 +475,15 @@ add_cache_entry(unsigned int hash, const + + hlist_add_head_rcu(&ce->hlist, &cache_htable[hash]); + +- mutex_lock(&vol_lock); +- if (cache_ttl < 0) { ++ spin_lock(&cache_ttl_lock); ++ if (!cache_ttl) { + cache_ttl = ce->ttl; + queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); + } else { + cache_ttl = min_t(int, cache_ttl, ce->ttl); + mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); + } +- mutex_unlock(&vol_lock); ++ spin_unlock(&cache_ttl_lock); + + return ce; + } +@@ -523,21 +527,32 @@ static inline void destroy_slab_cache(vo + kmem_cache_destroy(cache_slab); + } + +-static inline void free_vol(struct vol_info *vi) ++static void __vol_release(struct vol_info *vi) + { +- list_del(&vi->list); + kfree(vi->fullpath); + kfree(vi->mntdata); + cifs_cleanup_volume_info_contents(&vi->smb_vol); + kfree(vi); + } + ++static void vol_release(struct kref *kref) ++{ ++ struct vol_info *vi = container_of(kref, struct vol_info, refcnt); ++ ++ spin_lock(&vol_list_lock); ++ list_del(&vi->list); ++ spin_unlock(&vol_list_lock); ++ __vol_release(vi); ++} ++ + static inline void free_vol_list(void) + { + struct vol_info *vi, *nvi; + +- list_for_each_entry_safe(vi, nvi, &vol_list, list) +- free_vol(vi); ++ list_for_each_entry_safe(vi, nvi, &vol_list, list) { ++ list_del_init(&vi->list); ++ __vol_release(vi); ++ } + } + + /** +@@ -1156,10 +1171,13 @@ int dfs_cache_add_vol(char *mntdata, str + goto err_free_fullpath; + + vi->mntdata = mntdata; ++ spin_lock_init(&vi->smb_vol_lock); ++ kref_init(&vi->refcnt); + +- mutex_lock(&vol_lock); ++ spin_lock(&vol_list_lock); + list_add_tail(&vi->list, &vol_list); +- mutex_unlock(&vol_lock); ++ spin_unlock(&vol_list_lock); ++ + return 0; + + err_free_fullpath: +@@ -1169,7 +1187,8 @@ err_free_vi: + return rc; + } + +-static inline struct vol_info *find_vol(const char *fullpath) ++/* Must be called with vol_list_lock held */ ++static struct vol_info *find_vol(const char *fullpath) + { + struct vol_info *vi; + +@@ -1191,7 +1210,6 @@ static inline struct vol_info *find_vol( + */ + int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) + { +- int rc; + struct vol_info *vi; + + if (!fullpath || !server) +@@ -1199,22 +1217,24 @@ int dfs_cache_update_vol(const char *ful + + cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); + +- mutex_lock(&vol_lock); +- ++ spin_lock(&vol_list_lock); + vi = find_vol(fullpath); + if (IS_ERR(vi)) { +- rc = PTR_ERR(vi); +- goto out; ++ spin_unlock(&vol_list_lock); ++ return PTR_ERR(vi); + } ++ kref_get(&vi->refcnt); ++ spin_unlock(&vol_list_lock); + + cifs_dbg(FYI, "%s: updating volume info\n", __func__); ++ spin_lock(&vi->smb_vol_lock); + memcpy(&vi->smb_vol.dstaddr, &server->dstaddr, + sizeof(vi->smb_vol.dstaddr)); +- rc = 0; ++ spin_unlock(&vi->smb_vol_lock); + +-out: +- mutex_unlock(&vol_lock); +- return rc; ++ kref_put(&vi->refcnt, vol_release); ++ ++ return 0; + } + + /** +@@ -1231,11 +1251,11 @@ void dfs_cache_del_vol(const char *fullp + + cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); + +- mutex_lock(&vol_lock); ++ spin_lock(&vol_list_lock); + vi = find_vol(fullpath); +- if (!IS_ERR(vi)) +- free_vol(vi); +- mutex_unlock(&vol_lock); ++ spin_unlock(&vol_list_lock); ++ ++ kref_put(&vi->refcnt, vol_release); + } + + /* Get all tcons that are within a DFS namespace and can be refreshed */ +@@ -1449,27 +1469,52 @@ out: + */ + static void refresh_cache_worker(struct work_struct *work) + { +- struct vol_info *vi; ++ struct vol_info *vi, *nvi; + struct TCP_Server_Info *server; +- LIST_HEAD(list); ++ LIST_HEAD(vols); ++ LIST_HEAD(tcons); + struct cifs_tcon *tcon, *ntcon; + +- mutex_lock(&vol_lock); +- ++ /* ++ * Find SMB volumes that are eligible (server->tcpStatus == CifsGood) ++ * for refreshing. ++ */ ++ spin_lock(&vol_list_lock); + list_for_each_entry(vi, &vol_list, list) { + server = get_tcp_server(&vi->smb_vol); + if (!server) + continue; + +- get_tcons(server, &list); +- list_for_each_entry_safe(tcon, ntcon, &list, ulist) { ++ kref_get(&vi->refcnt); ++ list_add_tail(&vi->rlist, &vols); ++ put_tcp_server(server); ++ } ++ spin_unlock(&vol_list_lock); ++ ++ /* Walk through all TCONs and refresh any expired cache entry */ ++ list_for_each_entry_safe(vi, nvi, &vols, rlist) { ++ spin_lock(&vi->smb_vol_lock); ++ server = get_tcp_server(&vi->smb_vol); ++ spin_unlock(&vi->smb_vol_lock); ++ ++ if (!server) ++ goto next_vol; ++ ++ get_tcons(server, &tcons); ++ list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) { + refresh_tcon(vi, tcon); + list_del_init(&tcon->ulist); + cifs_put_tcon(tcon); + } + + put_tcp_server(server); ++ ++next_vol: ++ list_del_init(&vi->rlist); ++ kref_put(&vi->refcnt, vol_release); + } ++ ++ spin_lock(&cache_ttl_lock); + queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); +- mutex_unlock(&vol_lock); ++ spin_unlock(&cache_ttl_lock); + } diff --git a/queue-5.4/cifs-get-rid-of-kstrdup_const-d-paths.patch b/queue-5.4/cifs-get-rid-of-kstrdup_const-d-paths.patch new file mode 100644 index 00000000000..8fb98822da7 --- /dev/null +++ b/queue-5.4/cifs-get-rid-of-kstrdup_const-d-paths.patch @@ -0,0 +1,52 @@ +From stable-owner@vger.kernel.org Fri Jun 23 23:35:44 2023 +From: Rishabh Bhatnagar +Date: Fri, 23 Jun 2023 21:34:03 +0000 +Subject: cifs: Get rid of kstrdup_const()'d paths +To: , +Cc: , , , Aurelien Aptel , Steve French , Rishabh Bhatnagar +Message-ID: <20230623213406.5596-3-risbhat@amazon.com> + +From: "Paulo Alcantara (SUSE)" + +commit 199c6bdfb04b71d88a7765e08285885fbca60df4 upstream. + +The DFS cache API is mostly used with heap allocated strings. + +Signed-off-by: Paulo Alcantara (SUSE) +Reviewed-by: Aurelien Aptel +Signed-off-by: Steve French +Signed-off-by: Rishabh Bhatnagar +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/dfs_cache.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -131,7 +131,7 @@ static inline void flush_cache_ent(struc + return; + + hlist_del_init_rcu(&ce->hlist); +- kfree_const(ce->path); ++ kfree(ce->path); + free_tgts(ce); + cache_count--; + call_rcu(&ce->rcu, free_cache_entry); +@@ -420,7 +420,7 @@ static struct cache_entry *alloc_cache_e + if (!ce) + return ERR_PTR(-ENOMEM); + +- ce->path = kstrdup_const(path, GFP_KERNEL); ++ ce->path = kstrndup(path, strlen(path), GFP_KERNEL); + if (!ce->path) { + kmem_cache_free(cache_slab, ce); + return ERR_PTR(-ENOMEM); +@@ -430,7 +430,7 @@ static struct cache_entry *alloc_cache_e + + rc = copy_ref_data(refs, numrefs, ce, NULL); + if (rc) { +- kfree_const(ce->path); ++ kfree(ce->path); + kmem_cache_free(cache_slab, ce); + ce = ERR_PTR(rc); + } diff --git a/queue-5.4/cifs-introduce-helpers-for-finding-tcp-connection.patch b/queue-5.4/cifs-introduce-helpers-for-finding-tcp-connection.patch new file mode 100644 index 00000000000..41bc020e0e4 --- /dev/null +++ b/queue-5.4/cifs-introduce-helpers-for-finding-tcp-connection.patch @@ -0,0 +1,98 @@ +From stable-owner@vger.kernel.org Fri Jun 23 23:35:43 2023 +From: Rishabh Bhatnagar +Date: Fri, 23 Jun 2023 21:34:04 +0000 +Subject: cifs: Introduce helpers for finding TCP connection +To: , +Cc: , , , Aurelien Aptel , Steve French , Rishabh Bhatnagar +Message-ID: <20230623213406.5596-4-risbhat@amazon.com> + +From: "Paulo Alcantara (SUSE)" + +commit 345c1a4a9e09dc5842b7bbb6728a77910db69c52 upstream. + +Add helpers for finding TCP connections that are good candidates for +being used by DFS refresh worker. + +Signed-off-by: Paulo Alcantara (SUSE) +Reviewed-by: Aurelien Aptel +Signed-off-by: Steve French +Signed-off-by: Rishabh Bhatnagar +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/dfs_cache.c | 44 +++++++++++++++++++++++++++++++------------- + 1 file changed, 31 insertions(+), 13 deletions(-) + +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -1305,6 +1305,30 @@ static char *get_dfs_root(const char *pa + return npath; + } + ++static inline void put_tcp_server(struct TCP_Server_Info *server) ++{ ++ cifs_put_tcp_session(server, 0); ++} ++ ++static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol) ++{ ++ struct TCP_Server_Info *server; ++ ++ server = cifs_find_tcp_session(vol); ++ if (IS_ERR_OR_NULL(server)) ++ return NULL; ++ ++ spin_lock(&GlobalMid_Lock); ++ if (server->tcpStatus != CifsGood) { ++ spin_unlock(&GlobalMid_Lock); ++ put_tcp_server(server); ++ return NULL; ++ } ++ spin_unlock(&GlobalMid_Lock); ++ ++ return server; ++} ++ + /* Find root SMB session out of a DFS link path */ + static struct cifs_ses *find_root_ses(struct vol_info *vi, + struct cifs_tcon *tcon, +@@ -1347,13 +1371,8 @@ static struct cifs_ses *find_root_ses(st + goto out; + } + +- server = cifs_find_tcp_session(&vol); +- if (IS_ERR_OR_NULL(server)) { +- ses = ERR_PTR(-EHOSTDOWN); +- goto out; +- } +- if (server->tcpStatus != CifsGood) { +- cifs_put_tcp_session(server, 0); ++ server = get_tcp_server(&vol); ++ if (!server) { + ses = ERR_PTR(-EHOSTDOWN); + goto out; + } +@@ -1451,19 +1470,18 @@ static void refresh_cache_worker(struct + mutex_lock(&vol_lock); + + list_for_each_entry(vi, &vol_list, list) { +- server = cifs_find_tcp_session(&vi->smb_vol); +- if (IS_ERR_OR_NULL(server)) ++ server = get_tcp_server(&vi->smb_vol); ++ if (!server) + continue; +- if (server->tcpStatus != CifsGood) +- goto next; ++ + get_tcons(server, &list); + list_for_each_entry_safe(tcon, ntcon, &list, ulist) { + refresh_tcon(vi, tcon); + list_del_init(&tcon->ulist); + cifs_put_tcon(tcon); + } +-next: +- cifs_put_tcp_session(server, 0); ++ ++ put_tcp_server(server); + } + queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); + mutex_unlock(&vol_lock); diff --git a/queue-5.4/cifs-merge-is_path_valid-into-get_normalized_path.patch b/queue-5.4/cifs-merge-is_path_valid-into-get_normalized_path.patch new file mode 100644 index 00000000000..42c21a863c7 --- /dev/null +++ b/queue-5.4/cifs-merge-is_path_valid-into-get_normalized_path.patch @@ -0,0 +1,90 @@ +From stable-owner@vger.kernel.org Fri Jun 23 23:35:59 2023 +From: Rishabh Bhatnagar +Date: Fri, 23 Jun 2023 21:34:05 +0000 +Subject: cifs: Merge is_path_valid() into get_normalized_path() +To: , +Cc: , , , Aurelien Aptel , Steve French , Rishabh Bhatnagar +Message-ID: <20230623213406.5596-5-risbhat@amazon.com> + +From: "Paulo Alcantara (SUSE)" + +commit ff2f7fc08268f266372c30a815349749e8499eb5 upstream. + +Just do the trivial path validation in get_normalized_path(). + +Signed-off-by: Paulo Alcantara (SUSE) +Reviewed-by: Aurelien Aptel +Signed-off-by: Steve French +Signed-off-by: Rishabh Bhatnagar +Signed-off-by: Greg Kroah-Hartman +--- + fs/cifs/dfs_cache.c | 21 ++++----------------- + 1 file changed, 4 insertions(+), 17 deletions(-) + +--- a/fs/cifs/dfs_cache.c ++++ b/fs/cifs/dfs_cache.c +@@ -75,13 +75,11 @@ static void refresh_cache_worker(struct + + static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); + +-static inline bool is_path_valid(const char *path) ++static int get_normalized_path(const char *path, char **npath) + { +- return path && (strchr(path + 1, '\\') || strchr(path + 1, '/')); +-} ++ if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/')) ++ return -EINVAL; + +-static inline int get_normalized_path(const char *path, char **npath) +-{ + if (*path == '\\') { + *npath = (char *)path; + } else { +@@ -828,9 +826,6 @@ int dfs_cache_find(const unsigned int xi + char *npath; + struct cache_entry *ce; + +- if (unlikely(!is_path_valid(path))) +- return -EINVAL; +- + rc = get_normalized_path(path, &npath); + if (rc) + return rc; +@@ -875,9 +870,6 @@ int dfs_cache_noreq_find(const char *pat + char *npath; + struct cache_entry *ce; + +- if (unlikely(!is_path_valid(path))) +- return -EINVAL; +- + rc = get_normalized_path(path, &npath); + if (rc) + return rc; +@@ -929,9 +921,6 @@ int dfs_cache_update_tgthint(const unsig + struct cache_entry *ce; + struct cache_dfs_tgt *t; + +- if (unlikely(!is_path_valid(path))) +- return -EINVAL; +- + rc = get_normalized_path(path, &npath); + if (rc) + return rc; +@@ -989,7 +978,7 @@ int dfs_cache_noreq_update_tgthint(const + struct cache_entry *ce; + struct cache_dfs_tgt *t; + +- if (unlikely(!is_path_valid(path)) || !it) ++ if (!it) + return -EINVAL; + + rc = get_normalized_path(path, &npath); +@@ -1049,8 +1038,6 @@ int dfs_cache_get_tgt_referral(const cha + + if (!it || !ref) + return -EINVAL; +- if (unlikely(!is_path_valid(path))) +- return -EINVAL; + + rc = get_normalized_path(path, &npath); + if (rc) diff --git a/queue-5.4/nilfs2-prevent-general-protection-fault-in-nilfs_clear_dirty_page.patch b/queue-5.4/nilfs2-prevent-general-protection-fault-in-nilfs_clear_dirty_page.patch new file mode 100644 index 00000000000..5eeeaf9c79c --- /dev/null +++ b/queue-5.4/nilfs2-prevent-general-protection-fault-in-nilfs_clear_dirty_page.patch @@ -0,0 +1,56 @@ +From 782e53d0c14420858dbf0f8f797973c150d3b6d7 Mon Sep 17 00:00:00 2001 +From: Ryusuke Konishi +Date: Mon, 12 Jun 2023 11:14:56 +0900 +Subject: nilfs2: prevent general protection fault in nilfs_clear_dirty_page() + +From: Ryusuke Konishi + +commit 782e53d0c14420858dbf0f8f797973c150d3b6d7 upstream. + +In a syzbot stress test that deliberately causes file system errors on +nilfs2 with a corrupted disk image, it has been reported that +nilfs_clear_dirty_page() called from nilfs_clear_dirty_pages() can cause a +general protection fault. + +In nilfs_clear_dirty_pages(), when looking up dirty pages from the page +cache and calling nilfs_clear_dirty_page() for each dirty page/folio +retrieved, the back reference from the argument page to "mapping" may have +been changed to NULL (and possibly others). It is necessary to check this +after locking the page/folio. + +So, fix this issue by not calling nilfs_clear_dirty_page() on a page/folio +after locking it in nilfs_clear_dirty_pages() if the back reference +"mapping" from the page/folio is different from the "mapping" that held +the page/folio just before. + +Link: https://lkml.kernel.org/r/20230612021456.3682-1-konishi.ryusuke@gmail.com +Signed-off-by: Ryusuke Konishi +Reported-by: syzbot+53369d11851d8f26735c@syzkaller.appspotmail.com +Closes: https://lkml.kernel.org/r/000000000000da4f6b05eb9bf593@google.com +Tested-by: Ryusuke Konishi +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + fs/nilfs2/page.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +--- a/fs/nilfs2/page.c ++++ b/fs/nilfs2/page.c +@@ -370,7 +370,15 @@ void nilfs_clear_dirty_pages(struct addr + struct page *page = pvec.pages[i]; + + lock_page(page); +- nilfs_clear_dirty_page(page, silent); ++ ++ /* ++ * This page may have been removed from the address ++ * space by truncation or invalidation when the lock ++ * was acquired. Skip processing in that case. ++ */ ++ if (likely(page->mapping == mapping)) ++ nilfs_clear_dirty_page(page, silent); ++ + unlock_page(page); + } + pagevec_release(&pvec); diff --git a/queue-5.4/series b/queue-5.4/series index d469c630673..d46b27cd867 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -14,3 +14,10 @@ pci-hv-fix-a-race-condition-bug-in-hv_pci_query_relations.patch cgroup-do-not-corrupt-task-iteration-when-rebinding-subsystem.patch mmc-meson-gx-remove-redundant-mmc_request_done-call-from-irq-context.patch ip_tunnels-allow-vxlan-geneve-to-inherit-tos-ttl-from-vlan.patch +writeback-fix-dereferencing-null-mapping-host-on-writeback_page_template.patch +nilfs2-prevent-general-protection-fault-in-nilfs_clear_dirty_page.patch +cifs-clean-up-dfs-referral-cache.patch +cifs-get-rid-of-kstrdup_const-d-paths.patch +cifs-introduce-helpers-for-finding-tcp-connection.patch +cifs-merge-is_path_valid-into-get_normalized_path.patch +cifs-fix-potential-deadlock-when-updating-vol-in-cifs_reconnect.patch diff --git a/queue-5.4/writeback-fix-dereferencing-null-mapping-host-on-writeback_page_template.patch b/queue-5.4/writeback-fix-dereferencing-null-mapping-host-on-writeback_page_template.patch new file mode 100644 index 00000000000..e17851a2b82 --- /dev/null +++ b/queue-5.4/writeback-fix-dereferencing-null-mapping-host-on-writeback_page_template.patch @@ -0,0 +1,99 @@ +From 54abe19e00cfcc5a72773d15cd00ed19ab763439 Mon Sep 17 00:00:00 2001 +From: Rafael Aquini +Date: Tue, 6 Jun 2023 19:36:13 -0400 +Subject: writeback: fix dereferencing NULL mapping->host on writeback_page_template + +From: Rafael Aquini + +commit 54abe19e00cfcc5a72773d15cd00ed19ab763439 upstream. + +When commit 19343b5bdd16 ("mm/page-writeback: introduce tracepoint for +wait_on_page_writeback()") repurposed the writeback_dirty_page trace event +as a template to create its new wait_on_page_writeback trace event, it +ended up opening a window to NULL pointer dereference crashes due to the +(infrequent) occurrence of a race where an access to a page in the +swap-cache happens concurrently with the moment this page is being written +to disk and the tracepoint is enabled: + + BUG: kernel NULL pointer dereference, address: 0000000000000040 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 800000010ec0a067 P4D 800000010ec0a067 PUD 102353067 PMD 0 + Oops: 0000 [#1] PREEMPT SMP PTI + CPU: 1 PID: 1320 Comm: shmem-worker Kdump: loaded Not tainted 6.4.0-rc5+ #13 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS edk2-20230301gitf80f052277c8-1.fc37 03/01/2023 + RIP: 0010:trace_event_raw_event_writeback_folio_template+0x76/0xf0 + Code: 4d 85 e4 74 5c 49 8b 3c 24 e8 06 98 ee ff 48 89 c7 e8 9e 8b ee ff ba 20 00 00 00 48 89 ef 48 89 c6 e8 fe d4 1a 00 49 8b 04 24 <48> 8b 40 40 48 89 43 28 49 8b 45 20 48 89 e7 48 89 43 30 e8 a2 4d + RSP: 0000:ffffaad580b6fb60 EFLAGS: 00010246 + RAX: 0000000000000000 RBX: ffff90e38035c01c RCX: 0000000000000000 + RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff90e38035c044 + RBP: ffff90e38035c024 R08: 0000000000000002 R09: 0000000000000006 + R10: ffff90e38035c02e R11: 0000000000000020 R12: ffff90e380bac000 + R13: ffffe3a7456d9200 R14: 0000000000001b81 R15: ffffe3a7456d9200 + FS: 00007f2e4e8a15c0(0000) GS:ffff90e3fbc80000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000040 CR3: 00000001150c6003 CR4: 0000000000170ee0 + Call Trace: + + ? __die+0x20/0x70 + ? page_fault_oops+0x76/0x170 + ? kernelmode_fixup_or_oops+0x84/0x110 + ? exc_page_fault+0x65/0x150 + ? asm_exc_page_fault+0x22/0x30 + ? trace_event_raw_event_writeback_folio_template+0x76/0xf0 + folio_wait_writeback+0x6b/0x80 + shmem_swapin_folio+0x24a/0x500 + ? filemap_get_entry+0xe3/0x140 + shmem_get_folio_gfp+0x36e/0x7c0 + ? find_busiest_group+0x43/0x1a0 + shmem_fault+0x76/0x2a0 + ? __update_load_avg_cfs_rq+0x281/0x2f0 + __do_fault+0x33/0x130 + do_read_fault+0x118/0x160 + do_pte_missing+0x1ed/0x2a0 + __handle_mm_fault+0x566/0x630 + handle_mm_fault+0x91/0x210 + do_user_addr_fault+0x22c/0x740 + exc_page_fault+0x65/0x150 + asm_exc_page_fault+0x22/0x30 + +This problem arises from the fact that the repurposed writeback_dirty_page +trace event code was written assuming that every pointer to mapping +(struct address_space) would come from a file-mapped page-cache object, +thus mapping->host would always be populated, and that was a valid case +before commit 19343b5bdd16. The swap-cache address space +(swapper_spaces), however, doesn't populate its ->host (struct inode) +pointer, thus leading to the crashes in the corner-case aforementioned. + +commit 19343b5bdd16 ended up breaking the assignment of __entry->name and +__entry->ino for the wait_on_page_writeback tracepoint -- both dependent +on mapping->host carrying a pointer to a valid inode. The assignment of +__entry->name was fixed by commit 68f23b89067f ("memcg: fix a crash in +wb_workfn when a device disappears"), and this commit fixes the remaining +case, for __entry->ino. + +Link: https://lkml.kernel.org/r/20230606233613.1290819-1-aquini@redhat.com +Fixes: 19343b5bdd16 ("mm/page-writeback: introduce tracepoint for wait_on_page_writeback()") +Signed-off-by: Rafael Aquini +Reviewed-by: Yafang Shao +Cc: Aristeu Rozanski +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + +--- + include/trace/events/writeback.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/include/trace/events/writeback.h ++++ b/include/trace/events/writeback.h +@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(writeback_page_templ + strscpy_pad(__entry->name, + bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : + NULL), 32); +- __entry->ino = mapping ? mapping->host->i_ino : 0; ++ __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0; + __entry->index = page->index; + ), +