]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.2
authorSasha Levin <sashal@kernel.org>
Fri, 12 May 2023 18:01:18 +0000 (14:01 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 12 May 2023 18:01:18 +0000 (14:01 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
17 files changed:
queue-6.2/cifs-avoid-potential-races-when-handling-multiple-df.patch [new file with mode: 0644]
queue-6.2/cifs-check-only-tcon-status-on-tcon-related-function.patch [new file with mode: 0644]
queue-6.2/dmaengine-at_xdmac-restore-the-content-of-grws-regis.patch [new file with mode: 0644]
queue-6.2/ksmbd-block-asynchronous-requests-when-making-a-dela.patch [new file with mode: 0644]
queue-6.2/ksmbd-destroy-expired-sessions.patch [new file with mode: 0644]
queue-6.2/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch [new file with mode: 0644]
queue-6.2/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with.patch [new file with mode: 0644]
queue-6.2/ksmbd-implements-sess-ksmbd_chann_list-as-xarray.patch [new file with mode: 0644]
queue-6.2/kvm-x86-mmu-change-tdp_mmu-to-a-read-only-parameter.patch [new file with mode: 0644]
queue-6.2/kvm-x86-mmu-move-tdp-mmu-vm-init-uninit-behind-tdp_m.patch [new file with mode: 0644]
queue-6.2/kvm-x86-mmu-replace-open-coded-usage-of-tdp_mmu_page.patch [new file with mode: 0644]
queue-6.2/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explic.patch [new file with mode: 0644]
queue-6.2/netfilter-nf_tables-extended-netlink-error-reporting.patch [new file with mode: 0644]
queue-6.2/netfilter-nf_tables-hit-enoent-on-unexisting-chain-f.patch [new file with mode: 0644]
queue-6.2/netfilter-nf_tables-rename-function-to-destroy-hook-.patch [new file with mode: 0644]
queue-6.2/series
queue-6.2/wifi-iwlwifi-mvm-fix-potential-memory-leak.patch [new file with mode: 0644]

diff --git a/queue-6.2/cifs-avoid-potential-races-when-handling-multiple-df.patch b/queue-6.2/cifs-avoid-potential-races-when-handling-multiple-df.patch
new file mode 100644 (file)
index 0000000..ba30228
--- /dev/null
@@ -0,0 +1,392 @@
+From 06b40962ef4b3b27036a69101a893fb148fcd25c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Apr 2023 04:40:08 -0300
+Subject: cifs: avoid potential races when handling multiple dfs tcons
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 6be2ea33a4093402252724a00c4af8033725184c ]
+
+Now that a DFS tcon manages its own list of DFS referrals and
+sessions, there is no point in having a single worker to refresh
+referrals of all DFS tcons.  Make it faster and less prone to race
+conditions when having several mounts by queueing a worker per DFS
+tcon that will take care of refreshing only the DFS referrals related
+to it.
+
+Cc: stable@vger.kernel.org # v6.2+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/cifsglob.h  |   2 +-
+ fs/cifs/connect.c   |   7 ++-
+ fs/cifs/dfs.c       |   4 ++
+ fs/cifs/dfs_cache.c | 137 +++++++++++++++++++-------------------------
+ fs/cifs/dfs_cache.h |   9 +++
+ 5 files changed, 80 insertions(+), 79 deletions(-)
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index ea216e9d0f944..e6d12a6563887 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1244,8 +1244,8 @@ struct cifs_tcon {
+       struct cached_fids *cfids;
+       /* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+-      struct list_head ulist; /* cache update list */
+       struct list_head dfs_ses_list;
++      struct delayed_work dfs_cache_work;
+ #endif
+       struct delayed_work     query_interfaces; /* query interfaces workqueue job */
+ };
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index af491ae70678a..d71c2fb117c9e 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2386,6 +2386,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+       /* cancel polling of interfaces */
+       cancel_delayed_work_sync(&tcon->query_interfaces);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++      cancel_delayed_work_sync(&tcon->dfs_cache_work);
++#endif
+       if (tcon->use_witness) {
+               int rc;
+@@ -2633,7 +2636,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+               queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                                  (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       }
+-
++#ifdef CONFIG_CIFS_DFS_UPCALL
++      INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
++#endif
+       spin_lock(&cifs_tcp_ses_lock);
+       list_add(&tcon->tcon_list, &ses->tcon_list);
+       spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index f02f8d3b92ee8..a93dbca1411b2 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -157,6 +157,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+               rc = cifs_is_path_remote(mnt_ctx);
+       }
++      dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
++
+       if (rc == -EREMOTE && is_refsrv) {
+               rc2 = add_root_smb_session(mnt_ctx);
+               if (rc2)
+@@ -259,6 +261,8 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+               if (list_empty(&tcon->dfs_ses_list)) {
+                       list_replace_init(&mnt_ctx->dfs_ses_list,
+                                         &tcon->dfs_ses_list);
++                      queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++                                         dfs_cache_get_ttl() * HZ);
+               } else {
+                       dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+               }
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 6557d7b2798a0..1513b2709889b 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -20,12 +20,14 @@
+ #include "cifs_unicode.h"
+ #include "smb2glob.h"
+ #include "dns_resolve.h"
++#include "dfs.h"
+ #include "dfs_cache.h"
+-#define CACHE_HTABLE_SIZE 32
+-#define CACHE_MAX_ENTRIES 64
+-#define CACHE_MIN_TTL 120 /* 2 minutes */
++#define CACHE_HTABLE_SIZE     32
++#define CACHE_MAX_ENTRIES     64
++#define CACHE_MIN_TTL         120 /* 2 minutes */
++#define CACHE_DEFAULT_TTL     300 /* 5 minutes */
+ #define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
+@@ -50,10 +52,9 @@ struct cache_entry {
+ };
+ static struct kmem_cache *cache_slab __read_mostly;
+-static struct workqueue_struct *dfscache_wq __read_mostly;
++struct workqueue_struct *dfscache_wq;
+-static int cache_ttl;
+-static DEFINE_SPINLOCK(cache_ttl_lock);
++atomic_t dfs_cache_ttl;
+ static struct nls_table *cache_cp;
+@@ -65,10 +66,6 @@ static atomic_t cache_count;
+ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+ static DECLARE_RWSEM(htable_rw_lock);
+-static void refresh_cache_worker(struct work_struct *work);
+-
+-static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+-
+ /**
+  * dfs_cache_canonical_path - get a canonical DFS path
+  *
+@@ -290,7 +287,9 @@ int dfs_cache_init(void)
+       int rc;
+       int i;
+-      dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
++      dfscache_wq = alloc_workqueue("cifs-dfscache",
++                                    WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
++                                    0);
+       if (!dfscache_wq)
+               return -ENOMEM;
+@@ -306,6 +305,7 @@ int dfs_cache_init(void)
+               INIT_HLIST_HEAD(&cache_htable[i]);
+       atomic_set(&cache_count, 0);
++      atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
+       cache_cp = load_nls("utf8");
+       if (!cache_cp)
+               cache_cp = load_nls_default();
+@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+       int rc;
+       struct cache_entry *ce;
+       unsigned int hash;
++      int ttl;
+       WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+       if (IS_ERR(ce))
+               return ce;
+-      spin_lock(&cache_ttl_lock);
+-      if (!cache_ttl) {
+-              cache_ttl = ce->ttl;
+-              queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-      } else {
+-              cache_ttl = min_t(int, cache_ttl, ce->ttl);
+-              mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-      }
+-      spin_unlock(&cache_ttl_lock);
++      ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
++      atomic_set(&dfs_cache_ttl, ttl);
+       hlist_add_head(&ce->hlist, &cache_htable[hash]);
+       dump_ce(ce);
+@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
+  */
+ void dfs_cache_destroy(void)
+ {
+-      cancel_delayed_work_sync(&refresh_task);
+       unload_nls(cache_cp);
+       flush_cache_ents();
+       kmem_cache_destroy(cache_slab);
+@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
+  * target shares in @refs.
+  */
+ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
++                                       const char *path,
+                                        struct dfs_cache_tgt_list *old_tl,
+                                        struct dfs_cache_tgt_list *new_tl)
+ {
+@@ -1153,8 +1147,10 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+                    nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+                       if (target_share_equal(server,
+                                              dfs_cache_get_tgt_name(oit),
+-                                             dfs_cache_get_tgt_name(nit)))
++                                             dfs_cache_get_tgt_name(nit))) {
++                              dfs_cache_noreq_update_tgthint(path, nit);
+                               return;
++                      }
+               }
+       }
+@@ -1162,13 +1158,28 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+       cifs_signal_cifsd_for_reconnect(server, true);
+ }
++static bool is_ses_good(struct cifs_ses *ses)
++{
++      struct TCP_Server_Info *server = ses->server;
++      struct cifs_tcon *tcon = ses->tcon_ipc;
++      bool ret;
++
++      spin_lock(&ses->ses_lock);
++      spin_lock(&ses->chan_lock);
++      ret = !cifs_chan_needs_reconnect(ses, server) &&
++              ses->ses_status == SES_GOOD &&
++              !tcon->need_reconnect;
++      spin_unlock(&ses->chan_lock);
++      spin_unlock(&ses->ses_lock);
++      return ret;
++}
++
+ /* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
++static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
+ {
+       struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+       struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
+-      struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
+-      struct cifs_tcon *ipc = ses->tcon_ipc;
++      struct TCP_Server_Info *server = ses->server;
+       bool needs_refresh = false;
+       struct cache_entry *ce;
+       unsigned int xid;
+@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
+               goto out;
+       }
+-      spin_lock(&ipc->tc_lock);
+-      if (ipc->status != TID_GOOD) {
+-              spin_unlock(&ipc->tc_lock);
+-              cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
++      ses = CIFS_DFS_ROOT_SES(ses);
++      if (!is_ses_good(ses)) {
++              cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
++                       __func__);
+               goto out;
+       }
+-      spin_unlock(&ipc->tc_lock);
+       ce = cache_refresh_path(xid, ses, path, true);
+       if (!IS_ERR(ce)) {
+               rc = get_targets(ce, &new_tl);
+               up_read(&htable_rw_lock);
+               cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+-              mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
++              mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
+       }
+ out:
+@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
+ static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+ {
+       struct TCP_Server_Info *server = tcon->ses->server;
++      struct cifs_ses *ses = tcon->ses;
+       mutex_lock(&server->refpath_lock);
+       if (server->leaf_fullpath)
+-              __refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
++              __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
+       mutex_unlock(&server->refpath_lock);
+       return 0;
+ }
+@@ -1263,60 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+       return refresh_tcon(tcon, true);
+ }
+-/*
+- * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
+- * from a DFS referral.
+- */
+-static void refresh_cache_worker(struct work_struct *work)
++/* Refresh all DFS referrals related to DFS tcon */
++void dfs_cache_refresh(struct work_struct *work)
+ {
+       struct TCP_Server_Info *server;
+-      struct cifs_tcon *tcon, *ntcon;
+-      struct list_head tcons;
++      struct dfs_root_ses *rses;
++      struct cifs_tcon *tcon;
+       struct cifs_ses *ses;
+-      INIT_LIST_HEAD(&tcons);
++      tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
++      ses = tcon->ses;
++      server = ses->server;
+-      spin_lock(&cifs_tcp_ses_lock);
+-      list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-              spin_lock(&server->srv_lock);
+-              if (!server->leaf_fullpath) {
+-                      spin_unlock(&server->srv_lock);
+-                      continue;
+-              }
+-              spin_unlock(&server->srv_lock);
+-
+-              list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-                      if (ses->tcon_ipc) {
+-                              ses->ses_count++;
+-                              list_add_tail(&ses->tcon_ipc->ulist, &tcons);
+-                      }
+-                      list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-                              if (!tcon->ipc) {
+-                                      tcon->tc_count++;
+-                                      list_add_tail(&tcon->ulist, &tcons);
+-                              }
+-                      }
+-              }
+-      }
+-      spin_unlock(&cifs_tcp_ses_lock);
+-
+-      list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+-              struct TCP_Server_Info *server = tcon->ses->server;
+-
+-              list_del_init(&tcon->ulist);
++      mutex_lock(&server->refpath_lock);
++      if (server->leaf_fullpath)
++              __refresh_tcon(server->leaf_fullpath + 1, ses, false);
++      mutex_unlock(&server->refpath_lock);
++      list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
++              ses = rses->ses;
++              server = ses->server;
+               mutex_lock(&server->refpath_lock);
+               if (server->leaf_fullpath)
+-                      __refresh_tcon(server->leaf_fullpath + 1, tcon, false);
++                      __refresh_tcon(server->leaf_fullpath + 1, ses, false);
+               mutex_unlock(&server->refpath_lock);
+-
+-              if (tcon->ipc)
+-                      cifs_put_smb_ses(tcon->ses);
+-              else
+-                      cifs_put_tcon(tcon);
+       }
+-      spin_lock(&cache_ttl_lock);
+-      queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-      spin_unlock(&cache_ttl_lock);
++      queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++                         atomic_read(&dfs_cache_ttl) * HZ);
+ }
+diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
+index e0d39393035a9..c6d89cd6d4fd7 100644
+--- a/fs/cifs/dfs_cache.h
++++ b/fs/cifs/dfs_cache.h
+@@ -13,6 +13,9 @@
+ #include <linux/uuid.h>
+ #include "cifsglob.h"
++extern struct workqueue_struct *dfscache_wq;
++extern atomic_t dfs_cache_ttl;
++
+ #define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+ struct dfs_cache_tgt_list {
+@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
+                           char **prefix);
+ char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
++void dfs_cache_refresh(struct work_struct *work);
+ static inline struct dfs_cache_tgt_iterator *
+ dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
+@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
+       return tl ? tl->tl_numtgts : 0;
+ }
++static inline int dfs_cache_get_ttl(void)
++{
++      return atomic_read(&dfs_cache_ttl);
++}
++
+ #endif /* _CIFS_DFS_CACHE_H */
+-- 
+2.39.2
+
diff --git a/queue-6.2/cifs-check-only-tcon-status-on-tcon-related-function.patch b/queue-6.2/cifs-check-only-tcon-status-on-tcon-related-function.patch
new file mode 100644 (file)
index 0000000..d29ce73
--- /dev/null
@@ -0,0 +1,105 @@
+From 54b7205c669606ce34a287e72ad31c7bf33c40bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 10:45:12 +0000
+Subject: cifs: check only tcon status on tcon related functions
+
+From: Shyam Prasad N <sprasad@microsoft.com>
+
+[ Upstream commit 2f0e4f0342201fe2228fcc2301cc2b42ae04b8e3 ]
+
+We had a couple of checks for session in cifs_tree_connect
+and cifs_mark_open_files_invalid, which were unnecessary.
+And that was done with ses_lock. Changed that to tc_lock too.
+
+Signed-off-by: Shyam Prasad N <sprasad@microsoft.com>
+Reviewed-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 6be2ea33a409 ("cifs: avoid potential races when handling multiple dfs tcons")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/connect.c   | 10 +++++++---
+ fs/cifs/dfs.c       | 10 +++++++---
+ fs/cifs/dfs_cache.c |  2 +-
+ fs/cifs/file.c      |  8 ++++----
+ 4 files changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 87527512c2660..af491ae70678a 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4119,9 +4119,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+       /* only send once per connect */
+       spin_lock(&tcon->tc_lock);
+-      if (tcon->ses->ses_status != SES_GOOD ||
+-          (tcon->status != TID_NEW &&
+-          tcon->status != TID_NEED_TCON)) {
++      if (tcon->status != TID_NEW &&
++          tcon->status != TID_NEED_TCON) {
++              spin_unlock(&tcon->tc_lock);
++              return -EHOSTDOWN;
++      }
++
++      if (tcon->status == TID_GOOD) {
+               spin_unlock(&tcon->tc_lock);
+               return 0;
+       }
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index 4c392bde24066..f02f8d3b92ee8 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -571,9 +571,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+       /* only send once per connect */
+       spin_lock(&tcon->tc_lock);
+-      if (tcon->ses->ses_status != SES_GOOD ||
+-          (tcon->status != TID_NEW &&
+-          tcon->status != TID_NEED_TCON)) {
++      if (tcon->status != TID_NEW &&
++          tcon->status != TID_NEED_TCON) {
++              spin_unlock(&tcon->tc_lock);
++              return -EHOSTDOWN;
++      }
++
++      if (tcon->status == TID_GOOD) {
+               spin_unlock(&tcon->tc_lock);
+               return 0;
+       }
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 9ccaa0c7ac943..6557d7b2798a0 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1191,7 +1191,7 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
+       }
+       spin_lock(&ipc->tc_lock);
+-      if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
++      if (ipc->status != TID_GOOD) {
+               spin_unlock(&ipc->tc_lock);
+               cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
+               goto out;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index bef7c335ccc6e..d037366fcc5ee 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -48,13 +48,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+       struct list_head *tmp1;
+       /* only send once per connect */
+-      spin_lock(&tcon->ses->ses_lock);
+-      if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
+-              spin_unlock(&tcon->ses->ses_lock);
++      spin_lock(&tcon->tc_lock);
++      if (tcon->status != TID_NEED_RECON) {
++              spin_unlock(&tcon->tc_lock);
+               return;
+       }
+       tcon->status = TID_IN_FILES_INVALIDATE;
+-      spin_unlock(&tcon->ses->ses_lock);
++      spin_unlock(&tcon->tc_lock);
+       /* list all files open on tree connection and mark them invalid */
+       spin_lock(&tcon->open_file_lock);
+-- 
+2.39.2
+
diff --git a/queue-6.2/dmaengine-at_xdmac-restore-the-content-of-grws-regis.patch b/queue-6.2/dmaengine-at_xdmac-restore-the-content-of-grws-regis.patch
new file mode 100644 (file)
index 0000000..35e7332
--- /dev/null
@@ -0,0 +1,47 @@
+From b2286db5601a541156c242bdebe352896f919ea4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Feb 2023 17:18:24 +0200
+Subject: dmaengine: at_xdmac: restore the content of grws register
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 7c5eb63d16b01c202aaa95f374ae15a807745a73 ]
+
+In case the system suspends to a deep sleep state where power to DMA
+controller is cut-off we need to restore the content of GRWS register.
+This is a write only register and writing bit X tells the controller
+to suspend read and write requests for channel X. Thus set GRWS before
+restoring the content of GE (Global Enable) regiter.
+
+Fixes: e1f7c9eee707 ("dmaengine: at_xdmac: creation of the atmel eXtended DMA Controller driver")
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20230214151827.1050280-5-claudiu.beznea@microchip.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/at_xdmac.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index bfc8ae2143957..7bcfe3db28a47 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -2025,6 +2025,15 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
+               if (at_xdmac_chan_is_cyclic(atchan)) {
+                       if (at_xdmac_chan_is_paused(atchan))
+                               at_xdmac_device_resume(chan);
++
++                      /*
++                       * We may resume from a deep sleep state where power
++                       * to DMA controller is cut-off. Thus, restore the
++                       * suspend state of channels set though dmaengine API.
++                       */
++                      else if (at_xdmac_chan_is_paused(atchan))
++                              at_xdmac_device_pause_set(atxdmac, atchan);
++
+                       at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+                       at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+                       at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+-- 
+2.39.2
+
diff --git a/queue-6.2/ksmbd-block-asynchronous-requests-when-making-a-dela.patch b/queue-6.2/ksmbd-block-asynchronous-requests-when-making-a-dela.patch
new file mode 100644 (file)
index 0000000..1d2f34c
--- /dev/null
@@ -0,0 +1,44 @@
+From 13629d8f99647e6b19a207ac496855723adacabd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 08:43:30 +0900
+Subject: ksmbd: block asynchronous requests when making a delay on session
+ setup
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit b096d97f47326b1e2dbdef1c91fab69ffda54d17 ]
+
+ksmbd make a delay of 5 seconds on session setup to avoid dictionary
+attacks. But the 5 seconds delay can be bypassed by using asynchronous
+requests. This patch block all requests on current connection when
+making a delay on sesstion setup failure.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20482
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/smb2pdu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index ae0610c95e33c..51e95ea37195b 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1863,8 +1863,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                               try_delay = true;
+                       sess->state = SMB2_SESSION_EXPIRED;
+-                      if (try_delay)
++                      if (try_delay) {
++                              ksmbd_conn_set_need_reconnect(conn);
+                               ssleep(5);
++                              ksmbd_conn_set_need_negotiate(conn);
++                      }
+               }
+       }
+-- 
+2.39.2
+
diff --git a/queue-6.2/ksmbd-destroy-expired-sessions.patch b/queue-6.2/ksmbd-destroy-expired-sessions.patch
new file mode 100644 (file)
index 0000000..adf8d53
--- /dev/null
@@ -0,0 +1,192 @@
+From 12322e55bae970d02088aa34b13eb284b530c8bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 08:42:21 +0900
+Subject: ksmbd: destroy expired sessions
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit ea174a91893956450510945a0c5d1a10b5323656 ]
+
+client can indefinitely send smb2 session setup requests with
+the SessionId set to 0, thus indefinitely spawning new sessions,
+and causing indefinite memory usage. This patch limit to the number
+of sessions using expired timeout and session state.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20478
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/mgmt/user_session.c | 68 ++++++++++++++++++++----------------
+ fs/ksmbd/mgmt/user_session.h |  1 +
+ fs/ksmbd/smb2pdu.c           |  1 +
+ fs/ksmbd/smb2pdu.h           |  2 ++
+ 4 files changed, 41 insertions(+), 31 deletions(-)
+
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 69b85a98e2c35..b809f7987b9f4 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -174,70 +174,73 @@ static struct ksmbd_session *__session_lookup(unsigned long long id)
+       struct ksmbd_session *sess;
+       hash_for_each_possible(sessions_table, sess, hlist, id) {
+-              if (id == sess->id)
++              if (id == sess->id) {
++                      sess->last_active = jiffies;
+                       return sess;
++              }
+       }
+       return NULL;
+ }
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++      unsigned long id;
++      struct ksmbd_session *sess;
++
++      xa_for_each(&conn->sessions, id, sess) {
++              if (sess->state != SMB2_SESSION_VALID ||
++                  time_after(jiffies,
++                             sess->last_active + SMB2_SESSION_TIMEOUT)) {
++                      xa_erase(&conn->sessions, sess->id);
++                      ksmbd_session_destroy(sess);
++                      continue;
++              }
++      }
++}
++
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+                          struct ksmbd_session *sess)
+ {
+       sess->dialect = conn->dialect;
+       memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++      ksmbd_expire_session(conn);
+       return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+-static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+       struct channel *chann;
+       chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+       if (!chann)
+-              return -ENOENT;
++              return;
+       kfree(chann);
+-
+-      return 0;
+ }
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ {
+       struct ksmbd_session *sess;
++      unsigned long id;
+-      if (conn->binding) {
+-              int bkt;
+-
+-              down_write(&sessions_table_lock);
+-              hash_for_each(sessions_table, bkt, sess, hlist) {
+-                      if (!ksmbd_chann_del(conn, sess)) {
+-                              up_write(&sessions_table_lock);
+-                              goto sess_destroy;
+-                      }
++      xa_for_each(&conn->sessions, id, sess) {
++              ksmbd_chann_del(conn, sess);
++              if (xa_empty(&sess->ksmbd_chann_list)) {
++                      xa_erase(&conn->sessions, sess->id);
++                      ksmbd_session_destroy(sess);
+               }
+-              up_write(&sessions_table_lock);
+-      } else {
+-              unsigned long id;
+-
+-              xa_for_each(&conn->sessions, id, sess) {
+-                      if (!ksmbd_chann_del(conn, sess))
+-                              goto sess_destroy;
+-              }
+-      }
+-
+-      return;
+-
+-sess_destroy:
+-      if (xa_empty(&sess->ksmbd_chann_list)) {
+-              xa_erase(&conn->sessions, sess->id);
+-              ksmbd_session_destroy(sess);
+       }
+ }
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+                                          unsigned long long id)
+ {
+-      return xa_load(&conn->sessions, id);
++      struct ksmbd_session *sess;
++
++      sess = xa_load(&conn->sessions, id);
++      if (sess)
++              sess->last_active = jiffies;
++      return sess;
+ }
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+@@ -246,6 +249,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+       down_read(&sessions_table_lock);
+       sess = __session_lookup(id);
++      if (sess)
++              sess->last_active = jiffies;
+       up_read(&sessions_table_lock);
+       return sess;
+@@ -324,6 +329,7 @@ static struct ksmbd_session *__session_create(int protocol)
+       if (ksmbd_init_file_table(&sess->file_table))
+               goto error;
++      sess->last_active = jiffies;
+       sess->state = SMB2_SESSION_IN_PROGRESS;
+       set_session_flag(sess, protocol);
+       xa_init(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 44a3c67b2bd92..51f38e5b61abb 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -59,6 +59,7 @@ struct ksmbd_session {
+       __u8                            smb3signingkey[SMB3_SIGN_KEY_SIZE];
+       struct ksmbd_file_table         file_table;
++      unsigned long                   last_active;
+ };
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 51e95ea37195b..d6423f2fae6d9 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1862,6 +1862,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
++                      sess->last_active = jiffies;
+                       sess->state = SMB2_SESSION_EXPIRED;
+                       if (try_delay) {
+                               ksmbd_conn_set_need_reconnect(conn);
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index 0c8a770fe3189..df05c9b2504d4 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -61,6 +61,8 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_IN_PROGRESS      BIT(0)
+ #define SMB2_SESSION_VALID            BIT(1)
++#define SMB2_SESSION_TIMEOUT          (10 * HZ)
++
+ struct create_durable_req_v2 {
+       struct create_context ccontext;
+       __u8   Name[8];
+-- 
+2.39.2
+
diff --git a/queue-6.2/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch b/queue-6.2/ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
new file mode 100644 (file)
index 0000000..d454fcb
--- /dev/null
@@ -0,0 +1,399 @@
+From 975fcfa5f6b4252e00da64ce2c64ecf4a5189ff6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 16:45:00 +0900
+Subject: ksmbd: fix racy issue from session setup and logoff
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit f5c779b7ddbda30866cf2a27c63e34158f858c73 ]
+
+This racy issue is triggered by sending concurrent session setup and
+logoff requests. This patch does not set connection status as
+KSMBD_SESS_GOOD if state is KSMBD_SESS_NEED_RECONNECT in session setup.
+And relookup session to validate if session is deleted in logoff.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20481, ZDI-CAN-20590, ZDI-CAN-20596
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/connection.c        | 14 ++++----
+ fs/ksmbd/connection.h        | 39 ++++++++++++---------
+ fs/ksmbd/mgmt/user_session.c |  1 +
+ fs/ksmbd/server.c            |  3 +-
+ fs/ksmbd/smb2pdu.c           | 67 +++++++++++++++++++++++-------------
+ fs/ksmbd/transport_tcp.c     |  2 +-
+ 6 files changed, 77 insertions(+), 49 deletions(-)
+
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index b8f9d627f241d..3cb88853d6932 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -56,7 +56,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+               return NULL;
+       conn->need_neg = true;
+-      conn->status = KSMBD_SESS_NEW;
++      ksmbd_conn_set_new(conn);
+       conn->local_nls = load_nls("utf8");
+       if (!conn->local_nls)
+               conn->local_nls = load_nls_default();
+@@ -149,12 +149,12 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+       return ret;
+ }
+-static void ksmbd_conn_lock(struct ksmbd_conn *conn)
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
+ {
+       mutex_lock(&conn->srv_mutex);
+ }
+-static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ {
+       mutex_unlock(&conn->srv_mutex);
+ }
+@@ -245,7 +245,7 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+       if (!ksmbd_server_running())
+               return false;
+-      if (conn->status == KSMBD_SESS_EXITING)
++      if (ksmbd_conn_exiting(conn))
+               return false;
+       if (kthread_should_stop())
+@@ -305,7 +305,7 @@ int ksmbd_conn_handler_loop(void *p)
+               pdu_size = get_rfc1002_len(hdr_buf);
+               ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+-              if (conn->status == KSMBD_SESS_GOOD)
++              if (ksmbd_conn_good(conn))
+                       max_allowed_pdu_size =
+                               SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+               else
+@@ -314,7 +314,7 @@ int ksmbd_conn_handler_loop(void *p)
+               if (pdu_size > max_allowed_pdu_size) {
+                       pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
+                                       pdu_size, max_allowed_pdu_size,
+-                                      conn->status);
++                                      READ_ONCE(conn->status));
+                       break;
+               }
+@@ -418,7 +418,7 @@ static void stop_sessions(void)
+               if (task)
+                       ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+                                   task->comm, task_pid_nr(task));
+-              conn->status = KSMBD_SESS_EXITING;
++              ksmbd_conn_set_exiting(conn);
+               if (t->ops->shutdown) {
+                       read_unlock(&conn_list_lock);
+                       t->ops->shutdown(t);
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 0e3a848defaf3..98bb5f199fa24 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -162,6 +162,8 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+ /*
+  * WARNING
+@@ -169,43 +171,48 @@ void ksmbd_conn_transport_destroy(void);
+  * This is a hack. We will move status to a proper place once we land
+  * a multi-sessions support.
+  */
+-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_GOOD;
++      return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+ }
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
++      return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
++      return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+ }
+-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ {
+-      return work->conn->status == KSMBD_SESS_EXITING;
++      return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_GOOD;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+ }
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
++      WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+ }
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_NEED_RECONNECT;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+ {
+-      work->conn->status = KSMBD_SESS_EXITING;
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++      WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index a2b128dedcfcf..69b85a98e2c35 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -324,6 +324,7 @@ static struct ksmbd_session *__session_create(int protocol)
+       if (ksmbd_init_file_table(&sess->file_table))
+               goto error;
++      sess->state = SMB2_SESSION_IN_PROGRESS;
+       set_session_flag(sess, protocol);
+       xa_init(&sess->tree_conns);
+       xa_init(&sess->ksmbd_chann_list);
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index cd8a873347a79..dc76d7cf241f0 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -93,7 +93,8 @@ static inline int check_conn_state(struct ksmbd_work *work)
+ {
+       struct smb_hdr *rsp_hdr;
+-      if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
++      if (ksmbd_conn_exiting(work->conn) ||
++          ksmbd_conn_need_reconnect(work->conn)) {
+               rsp_hdr = work->response_buf;
+               rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+               return 1;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index fe70d36df735b..ae0610c95e33c 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -247,7 +247,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+       rsp = smb2_get_msg(work->response_buf);
+-      WARN_ON(ksmbd_conn_good(work));
++      WARN_ON(ksmbd_conn_good(conn));
+       rsp->StructureSize = cpu_to_le16(65);
+       ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+@@ -277,7 +277,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+               rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+       conn->use_spnego = true;
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+       return 0;
+ }
+@@ -567,7 +567,7 @@ int smb2_check_user_session(struct ksmbd_work *work)
+           cmd == SMB2_SESSION_SETUP_HE)
+               return 0;
+-      if (!ksmbd_conn_good(work))
++      if (!ksmbd_conn_good(conn))
+               return -EINVAL;
+       sess_id = le64_to_cpu(req_hdr->SessionId);
+@@ -600,7 +600,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+       prev_sess->state = SMB2_SESSION_EXPIRED;
+       xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+-              chann->conn->status = KSMBD_SESS_EXITING;
++              ksmbd_conn_set_exiting(chann->conn);
+ }
+ /**
+@@ -1067,7 +1067,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+       ksmbd_debug(SMB, "Received negotiate request\n");
+       conn->need_neg = false;
+-      if (ksmbd_conn_good(work)) {
++      if (ksmbd_conn_good(conn)) {
+               pr_err("conn->tcp_status is already in CifsGood State\n");
+               work->send_no_response = 1;
+               return rc;
+@@ -1222,7 +1222,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+       }
+       conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+ err_out:
+       if (rc < 0)
+@@ -1645,6 +1645,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+       rsp->SecurityBufferLength = 0;
+       inc_rfc1001_len(work->response_buf, 9);
++      ksmbd_conn_lock(conn);
+       if (!req->hdr.SessionId) {
+               sess = ksmbd_smb2_session_create();
+               if (!sess) {
+@@ -1692,6 +1693,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                       goto out_err;
+               }
++              if (ksmbd_conn_need_reconnect(conn)) {
++                      rc = -EFAULT;
++                      sess = NULL;
++                      goto out_err;
++              }
++
+               if (ksmbd_session_lookup(conn, sess_id)) {
+                       rc = -EACCES;
+                       goto out_err;
+@@ -1716,12 +1723,20 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                       rc = -ENOENT;
+                       goto out_err;
+               }
++
++              if (sess->state == SMB2_SESSION_EXPIRED) {
++                      rc = -EFAULT;
++                      goto out_err;
++              }
++
++              if (ksmbd_conn_need_reconnect(conn)) {
++                      rc = -EFAULT;
++                      sess = NULL;
++                      goto out_err;
++              }
+       }
+       work->sess = sess;
+-      if (sess->state == SMB2_SESSION_EXPIRED)
+-              sess->state = SMB2_SESSION_IN_PROGRESS;
+-
+       negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+       negblob_len = le16_to_cpu(req->SecurityBufferLength);
+       if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+@@ -1751,8 +1766,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                               goto out_err;
+                       }
+-                      ksmbd_conn_set_good(work);
+-                      sess->state = SMB2_SESSION_VALID;
++                      if (!ksmbd_conn_need_reconnect(conn)) {
++                              ksmbd_conn_set_good(conn);
++                              sess->state = SMB2_SESSION_VALID;
++                      }
+                       kfree(sess->Preauth_HashValue);
+                       sess->Preauth_HashValue = NULL;
+               } else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+@@ -1774,8 +1791,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                               if (rc)
+                                       goto out_err;
+-                              ksmbd_conn_set_good(work);
+-                              sess->state = SMB2_SESSION_VALID;
++                              if (!ksmbd_conn_need_reconnect(conn)) {
++                                      ksmbd_conn_set_good(conn);
++                                      sess->state = SMB2_SESSION_VALID;
++                              }
+                               if (conn->binding) {
+                                       struct preauth_session *preauth_sess;
+@@ -1843,14 +1862,13 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                       if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+                               try_delay = true;
+-                      xa_erase(&conn->sessions, sess->id);
+-                      ksmbd_session_destroy(sess);
+-                      work->sess = NULL;
++                      sess->state = SMB2_SESSION_EXPIRED;
+                       if (try_delay)
+                               ssleep(5);
+               }
+       }
++      ksmbd_conn_unlock(conn);
+       return rc;
+ }
+@@ -2075,21 +2093,24 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+       struct ksmbd_conn *conn = work->conn;
+       struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+-      struct ksmbd_session *sess = work->sess;
++      struct ksmbd_session *sess;
++      struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(work->response_buf, 4);
+       ksmbd_debug(SMB, "request\n");
+-      /* setting CifsExiting here may race with start_tcp_sess */
+-      ksmbd_conn_set_need_reconnect(work);
++      ksmbd_conn_set_need_reconnect(conn);
+       ksmbd_close_session_fds(work);
+       ksmbd_conn_wait_idle(conn);
++      /*
++       * Re-lookup session to validate if session is deleted
++       * while waiting request complete
++       */
++      sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
+       if (ksmbd_tree_conn_session_logoff(sess)) {
+-              struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+               smb2_set_err_rsp(work);
+@@ -2101,9 +2122,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+-
+-      /* let start_tcp_sess free connection info now */
+-      ksmbd_conn_set_need_negotiate(work);
++      ksmbd_conn_set_need_negotiate(conn);
+       return 0;
+ }
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 20e85e2701f26..eff7a1d793f00 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -333,7 +333,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+               if (length == -EINTR) {
+                       total_read = -ESHUTDOWN;
+                       break;
+-              } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
++              } else if (ksmbd_conn_need_reconnect(conn)) {
+                       total_read = -EAGAIN;
+                       break;
+               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+-- 
+2.39.2
+
diff --git a/queue-6.2/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with.patch b/queue-6.2/ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with.patch
new file mode 100644 (file)
index 0000000..9378d85
--- /dev/null
@@ -0,0 +1,411 @@
+From ca99a1e8474af5ae45f918aeb6d61577020399a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 May 2023 14:03:40 +0900
+Subject: ksmbd: fix racy issue from smb2 close and logoff with multichannel
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit abcc506a9a71976a8b4c9bf3ee6efd13229c1e19 ]
+
+When smb client send concurrent smb2 close and logoff request
+with multichannel connection, It can cause racy issue. logoff request
+free tcon and can cause UAF issues in smb2 close. When receiving logoff
+request with multichannel, ksmbd should wait until all remaning requests
+complete as well as ones in the current connection, and then make
+session expired.
+
+Cc: stable@vger.kernel.org
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-20796 ZDI-CAN-20595
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/connection.c        | 54 +++++++++++++++++++++++++++---------
+ fs/ksmbd/connection.h        | 19 +++++++++++--
+ fs/ksmbd/mgmt/tree_connect.c |  3 ++
+ fs/ksmbd/mgmt/user_session.c | 36 ++++++++++++++++++++----
+ fs/ksmbd/smb2pdu.c           | 21 +++++++-------
+ 5 files changed, 101 insertions(+), 32 deletions(-)
+
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 3cb88853d6932..e3312fbf4c090 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
+ static struct ksmbd_conn_ops default_conn_ops;
+ LIST_HEAD(conn_list);
+-DEFINE_RWLOCK(conn_list_lock);
++DECLARE_RWSEM(conn_list_lock);
+ /**
+  * ksmbd_conn_free() - free resources of the connection instance
+@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
+  */
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+-      write_lock(&conn_list_lock);
++      down_write(&conn_list_lock);
+       list_del(&conn->conns_list);
+-      write_unlock(&conn_list_lock);
++      up_write(&conn_list_lock);
+       xa_destroy(&conn->sessions);
+       kvfree(conn->request_buf);
+@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+       spin_lock_init(&conn->llist_lock);
+       INIT_LIST_HEAD(&conn->lock_list);
+-      write_lock(&conn_list_lock);
++      down_write(&conn_list_lock);
+       list_add(&conn->conns_list, &conn_list);
+-      write_unlock(&conn_list_lock);
++      up_write(&conn_list_lock);
+       return conn;
+ }
+@@ -95,7 +95,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+       struct ksmbd_conn *t;
+       bool ret = false;
+-      read_lock(&conn_list_lock);
++      down_read(&conn_list_lock);
+       list_for_each_entry(t, &conn_list, conns_list) {
+               if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+                       continue;
+@@ -103,7 +103,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+               ret = true;
+               break;
+       }
+-      read_unlock(&conn_list_lock);
++      up_read(&conn_list_lock);
+       return ret;
+ }
+@@ -159,9 +159,37 @@ void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+       mutex_unlock(&conn->srv_mutex);
+ }
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
++      struct ksmbd_conn *conn;
++
++      down_read(&conn_list_lock);
++      list_for_each_entry(conn, &conn_list, conns_list) {
++              if (conn->binding || xa_load(&conn->sessions, sess_id))
++                      WRITE_ONCE(conn->status, status);
++      }
++      up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++      struct ksmbd_conn *bind_conn;
++
+       wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++      down_read(&conn_list_lock);
++      list_for_each_entry(bind_conn, &conn_list, conns_list) {
++              if (bind_conn == conn)
++                      continue;
++
++              if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++                  !ksmbd_conn_releasing(bind_conn) &&
++                  atomic_read(&bind_conn->req_running)) {
++                      wait_event(bind_conn->req_running_q,
++                              atomic_read(&bind_conn->req_running) == 0);
++              }
++      }
++      up_read(&conn_list_lock);
+ }
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -362,10 +390,10 @@ int ksmbd_conn_handler_loop(void *p)
+       }
+ out:
++      ksmbd_conn_set_releasing(conn);
+       /* Wait till all reference dropped to the Server object*/
+       wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+-
+       if (IS_ENABLED(CONFIG_UNICODE))
+               utf8_unload(conn->um);
+       unload_nls(conn->local_nls);
+@@ -409,7 +437,7 @@ static void stop_sessions(void)
+       struct ksmbd_transport *t;
+ again:
+-      read_lock(&conn_list_lock);
++      down_read(&conn_list_lock);
+       list_for_each_entry(conn, &conn_list, conns_list) {
+               struct task_struct *task;
+@@ -420,12 +448,12 @@ static void stop_sessions(void)
+                                   task->comm, task_pid_nr(task));
+               ksmbd_conn_set_exiting(conn);
+               if (t->ops->shutdown) {
+-                      read_unlock(&conn_list_lock);
++                      up_read(&conn_list_lock);
+                       t->ops->shutdown(t);
+-                      read_lock(&conn_list_lock);
++                      down_read(&conn_list_lock);
+               }
+       }
+-      read_unlock(&conn_list_lock);
++      up_read(&conn_list_lock);
+       if (!list_empty(&conn_list)) {
+               schedule_timeout_interruptible(HZ / 10); /* 100ms */
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 98bb5f199fa24..ad8dfaa48ffb3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -26,7 +26,8 @@ enum {
+       KSMBD_SESS_GOOD,
+       KSMBD_SESS_EXITING,
+       KSMBD_SESS_NEED_RECONNECT,
+-      KSMBD_SESS_NEED_NEGOTIATE
++      KSMBD_SESS_NEED_NEGOTIATE,
++      KSMBD_SESS_RELEASING
+ };
+ struct ksmbd_stats {
+@@ -140,10 +141,10 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_PEER_SOCKADDR(c)    ((struct sockaddr *)&((c)->peer_addr))
+ extern struct list_head conn_list;
+-extern rwlock_t conn_list_lock;
++extern struct rw_semaphore conn_list_lock;
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+@@ -191,6 +192,11 @@ static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+       return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
++{
++      return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
++}
++
+ static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+       WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+@@ -215,4 +221,11 @@ static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
+ {
+       WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
+ }
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++      WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index f19de20c2960c..f07a05f376513 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -137,6 +137,9 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+       struct ksmbd_tree_connect *tc;
+       unsigned long id;
++      if (!sess)
++              return -EINVAL;
++
+       xa_for_each(&sess->tree_conns, id, tc)
+               ret |= ksmbd_tree_conn_disconnect(sess, tc);
+       xa_destroy(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index b809f7987b9f4..ea4b56d570fbb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -153,10 +153,6 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
+       if (!sess)
+               return;
+-      down_write(&sessions_table_lock);
+-      hash_del(&sess->hlist);
+-      up_write(&sessions_table_lock);
+-
+       if (sess->user)
+               ksmbd_free_user(sess->user);
+@@ -187,15 +183,18 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
+       unsigned long id;
+       struct ksmbd_session *sess;
++      down_write(&sessions_table_lock);
+       xa_for_each(&conn->sessions, id, sess) {
+               if (sess->state != SMB2_SESSION_VALID ||
+                   time_after(jiffies,
+                              sess->last_active + SMB2_SESSION_TIMEOUT)) {
+                       xa_erase(&conn->sessions, sess->id);
++                      hash_del(&sess->hlist);
+                       ksmbd_session_destroy(sess);
+                       continue;
+               }
+       }
++      up_write(&sessions_table_lock);
+ }
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+@@ -207,15 +206,16 @@ int ksmbd_session_register(struct ksmbd_conn *conn,
+       return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+-static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
++static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+       struct channel *chann;
+       chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
+       if (!chann)
+-              return;
++              return -ENOENT;
+       kfree(chann);
++      return 0;
+ }
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+@@ -223,13 +223,37 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+       struct ksmbd_session *sess;
+       unsigned long id;
++      down_write(&sessions_table_lock);
++      if (conn->binding) {
++              int bkt;
++              struct hlist_node *tmp;
++
++              hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++                      if (!ksmbd_chann_del(conn, sess) &&
++                          xa_empty(&sess->ksmbd_chann_list)) {
++                              hash_del(&sess->hlist);
++                              ksmbd_session_destroy(sess);
++                      }
++              }
++      }
++
+       xa_for_each(&conn->sessions, id, sess) {
++              unsigned long chann_id;
++              struct channel *chann;
++
++              xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++                      if (chann->conn != conn)
++                              ksmbd_conn_set_exiting(chann->conn);
++              }
++
+               ksmbd_chann_del(conn, sess);
+               if (xa_empty(&sess->ksmbd_chann_list)) {
+                       xa_erase(&conn->sessions, sess->id);
++                      hash_del(&sess->hlist);
+                       ksmbd_session_destroy(sess);
+               }
+       }
++      up_write(&sessions_table_lock);
+ }
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index d6423f2fae6d9..53badff17efaa 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2099,21 +2099,22 @@ int smb2_session_logoff(struct ksmbd_work *work)
+       struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+       struct ksmbd_session *sess;
+       struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++      u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+       rsp->StructureSize = cpu_to_le16(4);
+       inc_rfc1001_len(work->response_buf, 4);
+       ksmbd_debug(SMB, "request\n");
+-      ksmbd_conn_set_need_reconnect(conn);
++      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+       ksmbd_close_session_fds(work);
+-      ksmbd_conn_wait_idle(conn);
++      ksmbd_conn_wait_idle(conn, sess_id);
+       /*
+        * Re-lookup session to validate if session is deleted
+        * while waiting request complete
+        */
+-      sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
++      sess = ksmbd_session_lookup_all(conn, sess_id);
+       if (ksmbd_tree_conn_session_logoff(sess)) {
+               ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+               rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+@@ -2126,7 +2127,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+       ksmbd_free_user(sess->user);
+       sess->user = NULL;
+-      ksmbd_conn_set_need_negotiate(conn);
++      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+       return 0;
+ }
+@@ -6958,7 +6959,7 @@ int smb2_lock(struct ksmbd_work *work)
+               nolock = 1;
+               /* check locks in connection list */
+-              read_lock(&conn_list_lock);
++              down_read(&conn_list_lock);
+               list_for_each_entry(conn, &conn_list, conns_list) {
+                       spin_lock(&conn->llist_lock);
+                       list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+@@ -6975,7 +6976,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                               list_del(&cmp_lock->flist);
+                                               list_del(&cmp_lock->clist);
+                                               spin_unlock(&conn->llist_lock);
+-                                              read_unlock(&conn_list_lock);
++                                              up_read(&conn_list_lock);
+                                               locks_free_lock(cmp_lock->fl);
+                                               kfree(cmp_lock);
+@@ -6997,7 +6998,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                   cmp_lock->start > smb_lock->start &&
+                                   cmp_lock->start < smb_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("previous lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+@@ -7006,7 +7007,7 @@ int smb2_lock(struct ksmbd_work *work)
+                                   smb_lock->start > cmp_lock->start &&
+                                   smb_lock->start < cmp_lock->end) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("current lock conflict with zero byte lock range\n");
+                                       goto out;
+                               }
+@@ -7017,14 +7018,14 @@ int smb2_lock(struct ksmbd_work *work)
+                                     cmp_lock->end >= smb_lock->end)) &&
+                                   !cmp_lock->zero_len && !smb_lock->zero_len) {
+                                       spin_unlock(&conn->llist_lock);
+-                                      read_unlock(&conn_list_lock);
++                                      up_read(&conn_list_lock);
+                                       pr_err("Not allow lock operation on exclusive lock range\n");
+                                       goto out;
+                               }
+                       }
+                       spin_unlock(&conn->llist_lock);
+               }
+-              read_unlock(&conn_list_lock);
++              up_read(&conn_list_lock);
+ out_check_cl:
+               if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+                       pr_err("Try to unlock nolocked range\n");
+-- 
+2.39.2
+
diff --git a/queue-6.2/ksmbd-implements-sess-ksmbd_chann_list-as-xarray.patch b/queue-6.2/ksmbd-implements-sess-ksmbd_chann_list-as-xarray.patch
new file mode 100644 (file)
index 0000000..06fc894
--- /dev/null
@@ -0,0 +1,278 @@
+From d096f26387ca3ce0cee741009957731f56dbcc07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 Jan 2023 18:32:04 +0800
+Subject: ksmbd: Implements sess->ksmbd_chann_list as xarray
+
+From: Dawei Li <set_pte_at@outlook.com>
+
+[ Upstream commit 1d9c4172110e645b383ff13eee759728d74f1a5d ]
+
+For some ops on channel:
+1. lookup_chann_list(), possibly on high frequency.
+2. ksmbd_chann_del().
+
+Connection is used as indexing key to lookup channel, in that case,
+linear search based on list may suffer a bit for performance.
+
+Implements sess->ksmbd_chann_list as xarray.
+
+Signed-off-by: Dawei Li <set_pte_at@outlook.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: f5c779b7ddbd ("ksmbd: fix racy issue from session setup and logoff")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/mgmt/user_session.c | 61 ++++++++++++++----------------------
+ fs/ksmbd/mgmt/user_session.h |  4 +--
+ fs/ksmbd/smb2pdu.c           | 34 +++-----------------
+ 3 files changed, 30 insertions(+), 69 deletions(-)
+
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 92b1603b5abeb..a2b128dedcfcf 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -30,15 +30,15 @@ struct ksmbd_session_rpc {
+ static void free_channel_list(struct ksmbd_session *sess)
+ {
+-      struct channel *chann, *tmp;
++      struct channel *chann;
++      unsigned long index;
+-      write_lock(&sess->chann_lock);
+-      list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-                               chann_list) {
+-              list_del(&chann->chann_list);
++      xa_for_each(&sess->ksmbd_chann_list, index, chann) {
++              xa_erase(&sess->ksmbd_chann_list, index);
+               kfree(chann);
+       }
+-      write_unlock(&sess->chann_lock);
++
++      xa_destroy(&sess->ksmbd_chann_list);
+ }
+ static void __session_rpc_close(struct ksmbd_session *sess,
+@@ -190,21 +190,15 @@ int ksmbd_session_register(struct ksmbd_conn *conn,
+ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+-      struct channel *chann, *tmp;
+-
+-      write_lock(&sess->chann_lock);
+-      list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-                               chann_list) {
+-              if (chann->conn == conn) {
+-                      list_del(&chann->chann_list);
+-                      kfree(chann);
+-                      write_unlock(&sess->chann_lock);
+-                      return 0;
+-              }
+-      }
+-      write_unlock(&sess->chann_lock);
++      struct channel *chann;
++
++      chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++      if (!chann)
++              return -ENOENT;
+-      return -ENOENT;
++      kfree(chann);
++
++      return 0;
+ }
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+@@ -234,7 +228,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+       return;
+ sess_destroy:
+-      if (list_empty(&sess->ksmbd_chann_list)) {
++      if (xa_empty(&sess->ksmbd_chann_list)) {
+               xa_erase(&conn->sessions, sess->id);
+               ksmbd_session_destroy(sess);
+       }
+@@ -320,6 +314,9 @@ static struct ksmbd_session *__session_create(int protocol)
+       struct ksmbd_session *sess;
+       int ret;
++      if (protocol != CIFDS_SESSION_FLAG_SMB2)
++              return NULL;
++
+       sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+       if (!sess)
+               return NULL;
+@@ -329,30 +326,20 @@ static struct ksmbd_session *__session_create(int protocol)
+       set_session_flag(sess, protocol);
+       xa_init(&sess->tree_conns);
+-      INIT_LIST_HEAD(&sess->ksmbd_chann_list);
++      xa_init(&sess->ksmbd_chann_list);
+       INIT_LIST_HEAD(&sess->rpc_handle_list);
+       sess->sequence_number = 1;
+-      rwlock_init(&sess->chann_lock);
+-
+-      switch (protocol) {
+-      case CIFDS_SESSION_FLAG_SMB2:
+-              ret = __init_smb2_session(sess);
+-              break;
+-      default:
+-              ret = -EINVAL;
+-              break;
+-      }
++      ret = __init_smb2_session(sess);
+       if (ret)
+               goto error;
+       ida_init(&sess->tree_conn_ida);
+-      if (protocol == CIFDS_SESSION_FLAG_SMB2) {
+-              down_write(&sessions_table_lock);
+-              hash_add(sessions_table, &sess->hlist, sess->id);
+-              up_write(&sessions_table_lock);
+-      }
++      down_write(&sessions_table_lock);
++      hash_add(sessions_table, &sess->hlist, sess->id);
++      up_write(&sessions_table_lock);
++
+       return sess;
+ error:
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 8934b8ee275ba..44a3c67b2bd92 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -21,7 +21,6 @@ struct ksmbd_file_table;
+ struct channel {
+       __u8                    smb3signingkey[SMB3_SIGN_KEY_SIZE];
+       struct ksmbd_conn       *conn;
+-      struct list_head        chann_list;
+ };
+ struct preauth_session {
+@@ -50,8 +49,7 @@ struct ksmbd_session {
+       char                            sess_key[CIFS_KEY_SIZE];
+       struct hlist_node               hlist;
+-      rwlock_t                        chann_lock;
+-      struct list_head                ksmbd_chann_list;
++      struct xarray                   ksmbd_chann_list;
+       struct xarray                   tree_conns;
+       struct ida                      tree_conn_ida;
+       struct list_head                rpc_handle_list;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index decaef3592f43..fe70d36df735b 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -74,14 +74,7 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+ {
+-      struct channel *chann;
+-
+-      list_for_each_entry(chann, &sess->ksmbd_chann_list, chann_list) {
+-              if (chann->conn == conn)
+-                      return chann;
+-      }
+-
+-      return NULL;
++      return xa_load(&sess->ksmbd_chann_list, (long)conn);
+ }
+ /**
+@@ -592,6 +585,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+       struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+       struct ksmbd_user *prev_user;
+       struct channel *chann;
++      long index;
+       if (!prev_sess)
+               return;
+@@ -605,10 +599,8 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+               return;
+       prev_sess->state = SMB2_SESSION_EXPIRED;
+-      write_lock(&prev_sess->chann_lock);
+-      list_for_each_entry(chann, &prev_sess->ksmbd_chann_list, chann_list)
++      xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
+               chann->conn->status = KSMBD_SESS_EXITING;
+-      write_unlock(&prev_sess->chann_lock);
+ }
+ /**
+@@ -1521,19 +1513,14 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ binding_session:
+       if (conn->dialect >= SMB30_PROT_ID) {
+-              read_lock(&sess->chann_lock);
+               chann = lookup_chann_list(sess, conn);
+-              read_unlock(&sess->chann_lock);
+               if (!chann) {
+                       chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+                       if (!chann)
+                               return -ENOMEM;
+                       chann->conn = conn;
+-                      INIT_LIST_HEAD(&chann->chann_list);
+-                      write_lock(&sess->chann_lock);
+-                      list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-                      write_unlock(&sess->chann_lock);
++                      xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+               }
+       }
+@@ -1608,19 +1595,14 @@ static int krb5_authenticate(struct ksmbd_work *work)
+       }
+       if (conn->dialect >= SMB30_PROT_ID) {
+-              read_lock(&sess->chann_lock);
+               chann = lookup_chann_list(sess, conn);
+-              read_unlock(&sess->chann_lock);
+               if (!chann) {
+                       chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+                       if (!chann)
+                               return -ENOMEM;
+                       chann->conn = conn;
+-                      INIT_LIST_HEAD(&chann->chann_list);
+-                      write_lock(&sess->chann_lock);
+-                      list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-                      write_unlock(&sess->chann_lock);
++                      xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+               }
+       }
+@@ -8434,14 +8416,11 @@ int smb3_check_sign_req(struct ksmbd_work *work)
+       if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+               signing_key = work->sess->smb3signingkey;
+       } else {
+-              read_lock(&work->sess->chann_lock);
+               chann = lookup_chann_list(work->sess, conn);
+               if (!chann) {
+-                      read_unlock(&work->sess->chann_lock);
+                       return 0;
+               }
+               signing_key = chann->smb3signingkey;
+-              read_unlock(&work->sess->chann_lock);
+       }
+       if (!signing_key) {
+@@ -8501,14 +8480,11 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
+           le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+               signing_key = work->sess->smb3signingkey;
+       } else {
+-              read_lock(&work->sess->chann_lock);
+               chann = lookup_chann_list(work->sess, work->conn);
+               if (!chann) {
+-                      read_unlock(&work->sess->chann_lock);
+                       return;
+               }
+               signing_key = chann->smb3signingkey;
+-              read_unlock(&work->sess->chann_lock);
+       }
+       if (!signing_key)
+-- 
+2.39.2
+
diff --git a/queue-6.2/kvm-x86-mmu-change-tdp_mmu-to-a-read-only-parameter.patch b/queue-6.2/kvm-x86-mmu-change-tdp_mmu-to-a-read-only-parameter.patch
new file mode 100644 (file)
index 0000000..43e82ca
--- /dev/null
@@ -0,0 +1,324 @@
+From b49978fe85895c1c44051f2894675e2e6864aea5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Sep 2022 10:35:37 -0700
+Subject: KVM: x86/mmu: Change tdp_mmu to a read-only parameter
+
+From: David Matlack <dmatlack@google.com>
+
+[ Upstream commit 3af15ff47c4df3af7b36ea8315f43c6b0af49253 ]
+
+Change tdp_mmu to a read-only parameter and drop the per-vm
+tdp_mmu_enabled. For 32-bit KVM, make tdp_mmu_enabled a macro that is
+always false so that the compiler can continue omitting cals to the TDP
+MMU.
+
+The TDP MMU was introduced in 5.10 and has been enabled by default since
+5.15. At this point there are no known functionality gaps between the
+TDP MMU and the shadow MMU, and the TDP MMU uses less memory and scales
+better with the number of vCPUs. In other words, there is no good reason
+to disable the TDP MMU on a live system.
+
+Purposely do not drop tdp_mmu=N support (i.e. do not force 64-bit KVM to
+always use the TDP MMU) since tdp_mmu=N is still used to get test
+coverage of KVM's shadow MMU TDP support, which is used in 32-bit KVM.
+
+Signed-off-by: David Matlack <dmatlack@google.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20220921173546.2674386-2-dmatlack@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: edbdb43fc96b ("KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/kvm_host.h | 13 ++-------
+ arch/x86/kvm/mmu.h              |  6 ++--
+ arch/x86/kvm/mmu/mmu.c          | 51 ++++++++++++++++++++++-----------
+ arch/x86/kvm/mmu/tdp_mmu.c      |  9 ++----
+ 4 files changed, 41 insertions(+), 38 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 24480b4f1c575..adc3149c833a9 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1342,21 +1342,12 @@ struct kvm_arch {
+       struct task_struct *nx_huge_page_recovery_thread;
+ #ifdef CONFIG_X86_64
+-      /*
+-       * Whether the TDP MMU is enabled for this VM. This contains a
+-       * snapshot of the TDP MMU module parameter from when the VM was
+-       * created and remains unchanged for the life of the VM. If this is
+-       * true, TDP MMU handler functions will run for various MMU
+-       * operations.
+-       */
+-      bool tdp_mmu_enabled;
+-
+       /* The number of TDP MMU pages across all roots. */
+       atomic64_t tdp_mmu_pages;
+       /*
+-       * List of kvm_mmu_page structs being used as roots.
+-       * All kvm_mmu_page structs in the list should have
++       * List of struct kvm_mmu_pages being used as roots.
++       * All struct kvm_mmu_pages in the list should have
+        * tdp_mmu_page set.
+        *
+        * For reads, this list is protected by:
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 59804be91b5b0..0f38b78ab04b7 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -254,14 +254,14 @@ static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
+ }
+ #ifdef CONFIG_X86_64
+-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
++extern bool tdp_mmu_enabled;
+ #else
+-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
++#define tdp_mmu_enabled false
+ #endif
+ static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
+ {
+-      return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
++      return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
+ }
+ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index ce135539145fd..583979755bd4f 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -99,6 +99,13 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
+  */
+ bool tdp_enabled = false;
++bool __ro_after_init tdp_mmu_allowed;
++
++#ifdef CONFIG_X86_64
++bool __read_mostly tdp_mmu_enabled = true;
++module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
++#endif
++
+ static int max_huge_page_level __read_mostly;
+ static int tdp_root_level __read_mostly;
+ static int max_tdp_level __read_mostly;
+@@ -1293,7 +1300,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ {
+       struct kvm_rmap_head *rmap_head;
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
+                               slot->base_gfn + gfn_offset, mask, true);
+@@ -1326,7 +1333,7 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ {
+       struct kvm_rmap_head *rmap_head;
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
+                               slot->base_gfn + gfn_offset, mask, false);
+@@ -1409,7 +1416,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+               }
+       }
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               write_protected |=
+                       kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
+@@ -1572,7 +1579,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+       if (kvm_memslots_have_rmaps(kvm))
+               flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+       return flush;
+@@ -1585,7 +1592,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+       if (kvm_memslots_have_rmaps(kvm))
+               flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap);
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
+       return flush;
+@@ -1660,7 +1667,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+       if (kvm_memslots_have_rmaps(kvm))
+               young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
+       return young;
+@@ -1673,7 +1680,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+       if (kvm_memslots_have_rmaps(kvm))
+               young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
+       return young;
+@@ -3610,7 +3617,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
+       if (r < 0)
+               goto out_unlock;
+-      if (is_tdp_mmu_enabled(vcpu->kvm)) {
++      if (tdp_mmu_enabled) {
+               root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
+               mmu->root.hpa = root;
+       } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
+@@ -5743,6 +5750,9 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
+       tdp_root_level = tdp_forced_root_level;
+       max_tdp_level = tdp_max_root_level;
++#ifdef CONFIG_X86_64
++      tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
++#endif
+       /*
+        * max_huge_page_level reflects KVM's MMU capabilities irrespective
+        * of kernel support, e.g. KVM may be capable of using 1GB pages when
+@@ -5990,7 +6000,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+        * write and in the same critical section as making the reload request,
+        * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
+        */
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               kvm_tdp_mmu_invalidate_all_roots(kvm);
+       /*
+@@ -6015,7 +6025,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+        * Deferring the zap until the final reference to the root is put would
+        * lead to use-after-free.
+        */
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ }
+@@ -6127,7 +6137,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+       flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+-      if (is_tdp_mmu_enabled(kvm)) {
++      if (tdp_mmu_enabled) {
+               for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+                       flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+                                                     gfn_end, true, flush);
+@@ -6160,7 +6170,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+               write_unlock(&kvm->mmu_lock);
+       }
+-      if (is_tdp_mmu_enabled(kvm)) {
++      if (tdp_mmu_enabled) {
+               read_lock(&kvm->mmu_lock);
+               kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
+               read_unlock(&kvm->mmu_lock);
+@@ -6403,7 +6413,7 @@ void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
+                                  u64 start, u64 end,
+                                  int target_level)
+ {
+-      if (!is_tdp_mmu_enabled(kvm))
++      if (!tdp_mmu_enabled)
+               return;
+       if (kvm_memslots_have_rmaps(kvm))
+@@ -6424,7 +6434,7 @@ void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
+       u64 start = memslot->base_gfn;
+       u64 end = start + memslot->npages;
+-      if (!is_tdp_mmu_enabled(kvm))
++      if (!tdp_mmu_enabled)
+               return;
+       if (kvm_memslots_have_rmaps(kvm)) {
+@@ -6507,7 +6517,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+               write_unlock(&kvm->mmu_lock);
+       }
+-      if (is_tdp_mmu_enabled(kvm)) {
++      if (tdp_mmu_enabled) {
+               read_lock(&kvm->mmu_lock);
+               kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
+               read_unlock(&kvm->mmu_lock);
+@@ -6542,7 +6552,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+               write_unlock(&kvm->mmu_lock);
+       }
+-      if (is_tdp_mmu_enabled(kvm)) {
++      if (tdp_mmu_enabled) {
+               read_lock(&kvm->mmu_lock);
+               kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
+               read_unlock(&kvm->mmu_lock);
+@@ -6577,7 +6587,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+-      if (is_tdp_mmu_enabled(kvm))
++      if (tdp_mmu_enabled)
+               kvm_tdp_mmu_zap_all(kvm);
+       write_unlock(&kvm->mmu_lock);
+@@ -6742,6 +6752,13 @@ void __init kvm_mmu_x86_module_init(void)
+       if (nx_huge_pages == -1)
+               __set_nx_huge_pages(get_nx_auto_mode());
++      /*
++       * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
++       * TDP MMU is actually enabled is determined in kvm_configure_mmu()
++       * when the vendor module is loaded.
++       */
++      tdp_mmu_allowed = tdp_mmu_enabled;
++
+       kvm_mmu_spte_module_init();
+ }
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index d6df38d371a00..03511e83050fa 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -10,23 +10,18 @@
+ #include <asm/cmpxchg.h>
+ #include <trace/events/kvm.h>
+-static bool __read_mostly tdp_mmu_enabled = true;
+-module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
+-
+ /* Initializes the TDP MMU for the VM, if enabled. */
+ int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+ {
+       struct workqueue_struct *wq;
+-      if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
++      if (!tdp_mmu_enabled)
+               return 0;
+       wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+       if (!wq)
+               return -ENOMEM;
+-      /* This should not be changed for the lifetime of the VM. */
+-      kvm->arch.tdp_mmu_enabled = true;
+       INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+       spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
+       kvm->arch.tdp_mmu_zap_wq = wq;
+@@ -47,7 +42,7 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ {
+-      if (!kvm->arch.tdp_mmu_enabled)
++      if (!tdp_mmu_enabled)
+               return;
+       /* Also waits for any queued work items.  */
+-- 
+2.39.2
+
diff --git a/queue-6.2/kvm-x86-mmu-move-tdp-mmu-vm-init-uninit-behind-tdp_m.patch b/queue-6.2/kvm-x86-mmu-move-tdp-mmu-vm-init-uninit-behind-tdp_m.patch
new file mode 100644 (file)
index 0000000..1eec858
--- /dev/null
@@ -0,0 +1,122 @@
+From 6c0200d18fba9cf801bc9b3a8b34b940ad3f347f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Sep 2022 10:35:38 -0700
+Subject: KVM: x86/mmu: Move TDP MMU VM init/uninit behind tdp_mmu_enabled
+
+From: David Matlack <dmatlack@google.com>
+
+[ Upstream commit 991c8047b740f192a057d5f22df2f91f087cdb72 ]
+
+Move kvm_mmu_{init,uninit}_tdp_mmu() behind tdp_mmu_enabled. This makes
+these functions consistent with the rest of the calls into the TDP MMU
+from mmu.c, and which is now possible since tdp_mmu_enabled is only
+modified when the x86 vendor module is loaded. i.e. It will never change
+during the lifetime of a VM.
+
+This change also enabled removing the stub definitions for 32-bit KVM,
+as the compiler will just optimize the calls out like it does for all
+the other TDP MMU functions.
+
+No functional change intended.
+
+Signed-off-by: David Matlack <dmatlack@google.com>
+Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20220921173546.2674386-3-dmatlack@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: edbdb43fc96b ("KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/mmu.c     | 11 +++++++----
+ arch/x86/kvm/mmu/tdp_mmu.c |  6 ------
+ arch/x86/kvm/mmu/tdp_mmu.h |  7 +++----
+ 3 files changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 583979755bd4f..8666e8ff48a6e 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6051,9 +6051,11 @@ int kvm_mmu_init_vm(struct kvm *kvm)
+       INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
+       spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+-      r = kvm_mmu_init_tdp_mmu(kvm);
+-      if (r < 0)
+-              return r;
++      if (tdp_mmu_enabled) {
++              r = kvm_mmu_init_tdp_mmu(kvm);
++              if (r < 0)
++                      return r;
++      }
+       node->track_write = kvm_mmu_pte_write;
+       node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+@@ -6083,7 +6085,8 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
+       kvm_page_track_unregister_notifier(kvm, node);
+-      kvm_mmu_uninit_tdp_mmu(kvm);
++      if (tdp_mmu_enabled)
++              kvm_mmu_uninit_tdp_mmu(kvm);
+       mmu_free_vm_memory_caches(kvm);
+ }
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 03511e83050fa..7e5952e95d3bf 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -15,9 +15,6 @@ int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+ {
+       struct workqueue_struct *wq;
+-      if (!tdp_mmu_enabled)
+-              return 0;
+-
+       wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+       if (!wq)
+               return -ENOMEM;
+@@ -42,9 +39,6 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ {
+-      if (!tdp_mmu_enabled)
+-              return;
+-
+       /* Also waits for any queued work items.  */
+       destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index d3714200b932a..e4ab2dac269d6 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -7,6 +7,9 @@
+ #include "spte.h"
++int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
++void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
++
+ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
+ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
+@@ -68,8 +71,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
+                                       u64 *spte);
+ #ifdef CONFIG_X86_64
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+-void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
+@@ -89,8 +90,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
+       return sp && is_tdp_mmu_page(sp) && sp->root_count;
+ }
+ #else
+-static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
+-static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+ #endif
+-- 
+2.39.2
+
diff --git a/queue-6.2/kvm-x86-mmu-replace-open-coded-usage-of-tdp_mmu_page.patch b/queue-6.2/kvm-x86-mmu-replace-open-coded-usage-of-tdp_mmu_page.patch
new file mode 100644 (file)
index 0000000..17ce1e5
--- /dev/null
@@ -0,0 +1,54 @@
+From d8bfb1756ee6fbe2f6b4e7ebae8774321a981a86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 18:17:00 +0000
+Subject: KVM: x86/mmu: Replace open coded usage of tdp_mmu_page with
+ is_tdp_mmu_page()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit aeb568a1a6041e3d69def54046747bbd989bc4ed ]
+
+Use is_tdp_mmu_page() instead of querying sp->tdp_mmu_page directly so
+that all users benefit if KVM ever finds a way to optimize the logic.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20221012181702.3663607-10-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: edbdb43fc96b ("KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/mmu.c     | 2 +-
+ arch/x86/kvm/mmu/tdp_mmu.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 8666e8ff48a6e..dcca08a08bd0c 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -1942,7 +1942,7 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+               return true;
+       /* TDP MMU pages do not use the MMU generation. */
+-      return !sp->tdp_mmu_page &&
++      return !is_tdp_mmu_page(sp) &&
+              unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+ }
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 7e5952e95d3bf..cc1fb9a656201 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -133,7 +133,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+       if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+               return;
+-      WARN_ON(!root->tdp_mmu_page);
++      WARN_ON(!is_tdp_mmu_page(root));
+       /*
+        * The root now has refcount=0.  It is valid, but readers already
+-- 
+2.39.2
+
diff --git a/queue-6.2/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explic.patch b/queue-6.2/kvm-x86-preserve-tdp-mmu-roots-until-they-are-explic.patch
new file mode 100644 (file)
index 0000000..802f00b
--- /dev/null
@@ -0,0 +1,263 @@
+From fdef60b79da20d55e894f0d69c7e0888d4828f44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Apr 2023 15:03:23 -0700
+Subject: KVM: x86: Preserve TDP MMU roots until they are explicitly
+ invalidated
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit edbdb43fc96b11b3bfa531be306a1993d9fe89ec ]
+
+Preserve TDP MMU roots until they are explicitly invalidated by gifting
+the TDP MMU itself a reference to a root when it is allocated.  Keeping a
+reference in the TDP MMU fixes a flaw where the TDP MMU exhibits terrible
+performance, and can potentially even soft-hang a vCPU, if a vCPU
+frequently unloads its roots, e.g. when KVM is emulating SMI+RSM.
+
+When KVM emulates something that invalidates _all_ TLB entries, e.g. SMI
+and RSM, KVM unloads all of the vCPUs roots (KVM keeps a small per-vCPU
+cache of previous roots).  Unloading roots is a simple way to ensure KVM
+flushes and synchronizes all roots for the vCPU, as KVM flushes and syncs
+when allocating a "new" root (from the vCPU's perspective).
+
+In the shadow MMU, KVM keeps track of all shadow pages, roots included, in
+a per-VM hash table.  Unloading a shadow MMU root just wipes it from the
+per-vCPU cache; the root is still tracked in the per-VM hash table.  When
+KVM loads a "new" root for the vCPU, KVM will find the old, unloaded root
+in the per-VM hash table.
+
+Unlike the shadow MMU, the TDP MMU doesn't track "inactive" roots in a
+per-VM structure, where "active" in this case means a root is either
+in-use or cached as a previous root by at least one vCPU.  When a TDP MMU
+root becomes inactive, i.e. the last vCPU reference to the root is put,
+KVM immediately frees the root (asterisk on "immediately" as the actual
+freeing may be done by a worker, but for all intents and purposes the root
+is gone).
+
+The TDP MMU behavior is especially problematic for 1-vCPU setups, as
+unloading all roots effectively frees all roots.  The issue is mitigated
+to some degree in multi-vCPU setups as a different vCPU usually holds a
+reference to an unloaded root and thus keeps the root alive, allowing the
+vCPU to reuse its old root after unloading (with a flush+sync).
+
+The TDP MMU flaw has been known for some time, as until very recently,
+KVM's handling of CR0.WP also triggered unloading of all roots.  The
+CR0.WP toggling scenario was eventually addressed by not unloading roots
+when _only_ CR0.WP is toggled, but such an approach doesn't Just Work
+for emulating SMM as KVM must emulate a full TLB flush on entry and exit
+to/from SMM.  Given that the shadow MMU plays nice with unloading roots
+at will, teaching the TDP MMU to do the same is far less complex than
+modifying KVM to track which roots need to be flushed before reuse.
+
+Note, preserving all possible TDP MMU roots is not a concern with respect
+to memory consumption.  Now that the role for direct MMUs doesn't include
+information about the guest, e.g. CR0.PG, CR0.WP, CR4.SMEP, etc., there
+are _at most_ six possible roots (where "guest_mode" here means L2):
+
+  1. 4-level !SMM !guest_mode
+  2. 4-level  SMM !guest_mode
+  3. 5-level !SMM !guest_mode
+  4. 5-level  SMM !guest_mode
+  5. 4-level !SMM guest_mode
+  6. 5-level !SMM guest_mode
+
+And because each vCPU can track 4 valid roots, a VM can already have all
+6 root combinations live at any given time.  Not to mention that, in
+practice, no sane VMM will advertise different guest.MAXPHYADDR values
+across vCPUs, i.e. KVM won't ever use both 4-level and 5-level roots for
+a single VM.  Furthermore, the vast majority of modern hypervisors will
+utilize EPT/NPT when available, thus the guest_mode=%true cases are also
+unlikely to be utilized.
+
+Reported-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
+Link: https://lore.kernel.org/all/959c5bce-beb5-b463-7158-33fc4a4f910c@linux.microsoft.com
+Link: https://lkml.kernel.org/r/20220209170020.1775368-1-pbonzini%40redhat.com
+Link: https://lore.kernel.org/all/20230322013731.102955-1-minipli@grsecurity.net
+Link: https://lore.kernel.org/all/000000000000a0bc2b05f9dd7fab@google.com
+Link: https://lore.kernel.org/all/000000000000eca0b905fa0f7756@google.com
+Cc: Ben Gardon <bgardon@google.com>
+Cc: David Matlack <dmatlack@google.com>
+Cc: stable@vger.kernel.org
+Tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20230426220323.3079789-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/tdp_mmu.c | 121 +++++++++++++++++--------------------
+ 1 file changed, 56 insertions(+), 65 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index cc1fb9a656201..c649a333792b8 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -39,7 +39,17 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ {
+-      /* Also waits for any queued work items.  */
++      /*
++       * Invalidate all roots, which besides the obvious, schedules all roots
++       * for zapping and thus puts the TDP MMU's reference to each root, i.e.
++       * ultimately frees all roots.
++       */
++      kvm_tdp_mmu_invalidate_all_roots(kvm);
++
++      /*
++       * Destroying a workqueue also first flushes the workqueue, i.e. no
++       * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
++       */
+       destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+       WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+@@ -115,16 +125,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root
+       queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+ }
+-static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
+-{
+-      union kvm_mmu_page_role role = page->role;
+-      role.invalid = true;
+-
+-      /* No need to use cmpxchg, only the invalid bit can change.  */
+-      role.word = xchg(&page->role.word, role.word);
+-      return role.invalid;
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+                         bool shared)
+ {
+@@ -133,45 +133,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+       if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+               return;
+-      WARN_ON(!is_tdp_mmu_page(root));
+-
+       /*
+-       * The root now has refcount=0.  It is valid, but readers already
+-       * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
+-       * rejects it.  This remains true for the rest of the execution
+-       * of this function, because readers visit valid roots only
+-       * (except for tdp_mmu_zap_root_work(), which however
+-       * does not acquire any reference itself).
+-       *
+-       * Even though there are flows that need to visit all roots for
+-       * correctness, they all take mmu_lock for write, so they cannot yet
+-       * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
+-       * since the root still has refcount=0.
+-       *
+-       * However, tdp_mmu_zap_root can yield, and writers do not expect to
+-       * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
+-       * So the root temporarily gets an extra reference, going to refcount=1
+-       * while staying invalid.  Readers still cannot acquire any reference;
+-       * but writers are now allowed to run if tdp_mmu_zap_root yields and
+-       * they might take an extra reference if they themselves yield.
+-       * Therefore, when the reference is given back by the worker,
+-       * there is no guarantee that the refcount is still 1.  If not, whoever
+-       * puts the last reference will free the page, but they will not have to
+-       * zap the root because a root cannot go from invalid to valid.
++       * The TDP MMU itself holds a reference to each root until the root is
++       * explicitly invalidated, i.e. the final reference should be never be
++       * put for a valid root.
+        */
+-      if (!kvm_tdp_root_mark_invalid(root)) {
+-              refcount_set(&root->tdp_mmu_root_count, 1);
+-
+-              /*
+-               * Zapping the root in a worker is not just "nice to have";
+-               * it is required because kvm_tdp_mmu_invalidate_all_roots()
+-               * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
+-               * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
+-               * might return with some roots not zapped yet.
+-               */
+-              tdp_mmu_schedule_zap_root(kvm, root);
+-              return;
+-      }
++      KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
+       spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+       list_del_rcu(&root->link);
+@@ -319,7 +286,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+       root = tdp_mmu_alloc_sp(vcpu);
+       tdp_mmu_init_sp(root, NULL, 0, role);
+-      refcount_set(&root->tdp_mmu_root_count, 1);
++      /*
++       * TDP MMU roots are kept until they are explicitly invalidated, either
++       * by a memslot update or by the destruction of the VM.  Initialize the
++       * refcount to two; one reference for the vCPU, and one reference for
++       * the TDP MMU itself, which is held until the root is invalidated and
++       * is ultimately put by tdp_mmu_zap_root_work().
++       */
++      refcount_set(&root->tdp_mmu_root_count, 2);
+       spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+       list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
+@@ -1022,32 +996,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously, so a reference is taken on all roots.
+- * Using a separate workqueue makes it easy to ensure that the destruction is
+- * performed before the "fast zap" completes, without keeping a separate list
+- * of invalidated roots; the list is effectively the list of work items in
+- * the workqueue.
+- *
+- * Get a reference even if the root is already invalid, the asynchronous worker
+- * assumes it was gifted a reference to the root it processes.  Because mmu_lock
+- * is held for write, it should be impossible to observe a root with zero refcount,
+- * i.e. the list of roots cannot be stale.
++ * zapping is performed asynchronously.  Using a separate workqueue makes it
++ * easy to ensure that the destruction is performed before the "fast zap"
++ * completes, without keeping a separate list of invalidated roots; the list is
++ * effectively the list of work items in the workqueue.
+  *
+- * This has essentially the same effect for the TDP MMU
+- * as updating mmu_valid_gen does for the shadow MMU.
++ * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ {
+       struct kvm_mmu_page *root;
+-      lockdep_assert_held_write(&kvm->mmu_lock);
+-      list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+-              if (!root->role.invalid &&
+-                  !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
++      /*
++       * mmu_lock must be held for write to ensure that a root doesn't become
++       * invalid while there are active readers (invalidating a root while
++       * there are active readers may or may not be problematic in practice,
++       * but it's uncharted territory and not supported).
++       *
++       * Waive the assertion if there are no users of @kvm, i.e. the VM is
++       * being destroyed after all references have been put, or if no vCPUs
++       * have been created (which means there are no roots), i.e. the VM is
++       * being destroyed in an error path of KVM_CREATE_VM.
++       */
++      if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
++          refcount_read(&kvm->users_count) && kvm->created_vcpus)
++              lockdep_assert_held_write(&kvm->mmu_lock);
++
++      /*
++       * As above, mmu_lock isn't held when destroying the VM!  There can't
++       * be other references to @kvm, i.e. nothing else can invalidate roots
++       * or be consuming roots, but walking the list of roots does need to be
++       * guarded against roots being deleted by the asynchronous zap worker.
++       */
++      rcu_read_lock();
++
++      list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++              if (!root->role.invalid) {
+                       root->role.invalid = true;
+                       tdp_mmu_schedule_zap_root(kvm, root);
+               }
+       }
++
++      rcu_read_unlock();
+ }
+ /*
+-- 
+2.39.2
+
diff --git a/queue-6.2/netfilter-nf_tables-extended-netlink-error-reporting.patch b/queue-6.2/netfilter-nf_tables-extended-netlink-error-reporting.patch
new file mode 100644 (file)
index 0000000..666c780
--- /dev/null
@@ -0,0 +1,171 @@
+From bb1cb6e55faec46490e52759fca480cec7b7ccd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Apr 2023 00:34:28 +0200
+Subject: netfilter: nf_tables: extended netlink error reporting for netdevice
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit c3c060adc0249355411a93e61888051e6902b8a1 ]
+
+Flowtable and netdev chains are bound to one or several netdevice,
+extend netlink error reporting to specify the the netdevice that
+triggers the error.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 8509f62b0b07 ("netfilter: nf_tables: hit ENOENT on unexisting chain/flowtable update with missing attributes")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 38 ++++++++++++++++++++++-------------
+ 1 file changed, 24 insertions(+), 14 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 96bc4b8ded423..4b0a84a39b19e 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1951,7 +1951,8 @@ static struct nft_hook *nft_hook_list_find(struct list_head *hook_list,
+ static int nf_tables_parse_netdev_hooks(struct net *net,
+                                       const struct nlattr *attr,
+-                                      struct list_head *hook_list)
++                                      struct list_head *hook_list,
++                                      struct netlink_ext_ack *extack)
+ {
+       struct nft_hook *hook, *next;
+       const struct nlattr *tmp;
+@@ -1965,10 +1966,12 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
+               hook = nft_netdev_hook_alloc(net, tmp);
+               if (IS_ERR(hook)) {
++                      NL_SET_BAD_ATTR(extack, tmp);
+                       err = PTR_ERR(hook);
+                       goto err_hook;
+               }
+               if (nft_hook_list_find(hook_list, hook)) {
++                      NL_SET_BAD_ATTR(extack, tmp);
+                       kfree(hook);
+                       err = -EEXIST;
+                       goto err_hook;
+@@ -2001,20 +2004,23 @@ struct nft_chain_hook {
+ static int nft_chain_parse_netdev(struct net *net,
+                                 struct nlattr *tb[],
+-                                struct list_head *hook_list)
++                                struct list_head *hook_list,
++                                struct netlink_ext_ack *extack)
+ {
+       struct nft_hook *hook;
+       int err;
+       if (tb[NFTA_HOOK_DEV]) {
+               hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]);
+-              if (IS_ERR(hook))
++              if (IS_ERR(hook)) {
++                      NL_SET_BAD_ATTR(extack, tb[NFTA_HOOK_DEV]);
+                       return PTR_ERR(hook);
++              }
+               list_add_tail(&hook->list, hook_list);
+       } else if (tb[NFTA_HOOK_DEVS]) {
+               err = nf_tables_parse_netdev_hooks(net, tb[NFTA_HOOK_DEVS],
+-                                                 hook_list);
++                                                 hook_list, extack);
+               if (err < 0)
+                       return err;
+@@ -2082,7 +2088,7 @@ static int nft_chain_parse_hook(struct net *net,
+       INIT_LIST_HEAD(&hook->list);
+       if (nft_base_chain_netdev(family, hook->num)) {
+-              err = nft_chain_parse_netdev(net, ha, &hook->list);
++              err = nft_chain_parse_netdev(net, ha, &hook->list, extack);
+               if (err < 0) {
+                       module_put(type->owner);
+                       return err;
+@@ -7552,7 +7558,8 @@ static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX
+ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+                                   const struct nlattr *attr,
+                                   struct nft_flowtable_hook *flowtable_hook,
+-                                  struct nft_flowtable *flowtable, bool add)
++                                  struct nft_flowtable *flowtable,
++                                  struct netlink_ext_ack *extack, bool add)
+ {
+       struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
+       struct nft_hook *hook;
+@@ -7599,7 +7606,8 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+       if (tb[NFTA_FLOWTABLE_HOOK_DEVS]) {
+               err = nf_tables_parse_netdev_hooks(ctx->net,
+                                                  tb[NFTA_FLOWTABLE_HOOK_DEVS],
+-                                                 &flowtable_hook->list);
++                                                 &flowtable_hook->list,
++                                                 extack);
+               if (err < 0)
+                       return err;
+       }
+@@ -7742,7 +7750,8 @@ static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
+ }
+ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+-                              struct nft_flowtable *flowtable)
++                              struct nft_flowtable *flowtable,
++                              struct netlink_ext_ack *extack)
+ {
+       const struct nlattr * const *nla = ctx->nla;
+       struct nft_flowtable_hook flowtable_hook;
+@@ -7753,7 +7762,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+       int err;
+       err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, false);
++                                     &flowtable_hook, flowtable, extack, false);
+       if (err < 0)
+               return err;
+@@ -7858,7 +7867,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+               nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+-              return nft_flowtable_update(&ctx, info->nlh, flowtable);
++              return nft_flowtable_update(&ctx, info->nlh, flowtable, extack);
+       }
+       nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -7899,7 +7908,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+               goto err3;
+       err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, true);
++                                     &flowtable_hook, flowtable, extack, true);
+       if (err < 0)
+               goto err4;
+@@ -7951,7 +7960,8 @@ static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook
+ }
+ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+-                               struct nft_flowtable *flowtable)
++                               struct nft_flowtable *flowtable,
++                               struct netlink_ext_ack *extack)
+ {
+       const struct nlattr * const *nla = ctx->nla;
+       struct nft_flowtable_hook flowtable_hook;
+@@ -7961,7 +7971,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+       int err;
+       err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, false);
++                                     &flowtable_hook, flowtable, extack, false);
+       if (err < 0)
+               return err;
+@@ -8039,7 +8049,7 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
+       nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+       if (nla[NFTA_FLOWTABLE_HOOK])
+-              return nft_delflowtable_hook(&ctx, flowtable);
++              return nft_delflowtable_hook(&ctx, flowtable, extack);
+       if (flowtable->use > 0) {
+               NL_SET_BAD_ATTR(extack, attr);
+-- 
+2.39.2
+
diff --git a/queue-6.2/netfilter-nf_tables-hit-enoent-on-unexisting-chain-f.patch b/queue-6.2/netfilter-nf_tables-hit-enoent-on-unexisting-chain-f.patch
new file mode 100644 (file)
index 0000000..21c288f
--- /dev/null
@@ -0,0 +1,107 @@
+From 3c8643218ee83e553d534dc1aa466d6426e93026 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Apr 2023 16:50:32 +0200
+Subject: netfilter: nf_tables: hit ENOENT on unexisting chain/flowtable update
+ with missing attributes
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 8509f62b0b07ae8d6dec5aa9613ab1b250ff632f ]
+
+If user does not specify hook number and priority, then assume this is
+a chain/flowtable update. Therefore, report ENOENT which provides a
+better hint than EINVAL. Set on extended netlink error report to refer
+to the chain name.
+
+Fixes: 5b6743fb2c2a ("netfilter: nf_tables: skip flowtable hooknum and priority on device updates")
+Fixes: 5efe72698a97 ("netfilter: nf_tables: support for adding new devices to an existing netdev chain")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4ffafef46d2e2..d64478af0129f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2053,8 +2053,10 @@ static int nft_chain_parse_hook(struct net *net,
+               return err;
+       if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+-          ha[NFTA_HOOK_PRIORITY] == NULL)
+-              return -EINVAL;
++          ha[NFTA_HOOK_PRIORITY] == NULL) {
++              NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
++              return -ENOENT;
++      }
+       hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+       hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+@@ -7556,7 +7558,7 @@ static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX
+ };
+ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+-                                  const struct nlattr *attr,
++                                  const struct nlattr * const nla[],
+                                   struct nft_flowtable_hook *flowtable_hook,
+                                   struct nft_flowtable *flowtable,
+                                   struct netlink_ext_ack *extack, bool add)
+@@ -7568,15 +7570,18 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+       INIT_LIST_HEAD(&flowtable_hook->list);
+-      err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
++      err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX,
++                                        nla[NFTA_FLOWTABLE_HOOK],
+                                         nft_flowtable_hook_policy, NULL);
+       if (err < 0)
+               return err;
+       if (add) {
+               if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
+-                  !tb[NFTA_FLOWTABLE_HOOK_PRIORITY])
+-                      return -EINVAL;
++                  !tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
++                      NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
++                      return -ENOENT;
++              }
+               hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
+               if (hooknum != NF_NETDEV_INGRESS)
+@@ -7761,8 +7766,8 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+       u32 flags;
+       int err;
+-      err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, extack, false);
++      err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
++                                     extack, false);
+       if (err < 0)
+               return err;
+@@ -7907,8 +7912,8 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+       if (err < 0)
+               goto err3;
+-      err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, extack, true);
++      err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
++                                     extack, true);
+       if (err < 0)
+               goto err4;
+@@ -7970,8 +7975,8 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+       struct nft_trans *trans;
+       int err;
+-      err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-                                     &flowtable_hook, flowtable, extack, false);
++      err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
++                                     extack, false);
+       if (err < 0)
+               return err;
+-- 
+2.39.2
+
diff --git a/queue-6.2/netfilter-nf_tables-rename-function-to-destroy-hook-.patch b/queue-6.2/netfilter-nf_tables-rename-function-to-destroy-hook-.patch
new file mode 100644 (file)
index 0000000..a008d4b
--- /dev/null
@@ -0,0 +1,62 @@
+From 0e0fb358ea78ad0a5724eaf3ac2fc484af7e091c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Apr 2023 00:34:30 +0200
+Subject: netfilter: nf_tables: rename function to destroy hook list
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit cdc32546632354305afdcf399a5431138a31c9e0 ]
+
+Rename nft_flowtable_hooks_destroy() by nft_hooks_destroy() to prepare
+for netdev chain device updates.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 8509f62b0b07 ("netfilter: nf_tables: hit ENOENT on unexisting chain/flowtable update with missing attributes")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4b0a84a39b19e..4ffafef46d2e2 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7739,7 +7739,7 @@ static int nft_register_flowtable_net_hooks(struct net *net,
+       return err;
+ }
+-static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
++static void nft_hooks_destroy(struct list_head *hook_list)
+ {
+       struct nft_hook *hook, *next;
+@@ -7920,7 +7920,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+                                              &flowtable->hook_list,
+                                              flowtable);
+       if (err < 0) {
+-              nft_flowtable_hooks_destroy(&flowtable->hook_list);
++              nft_hooks_destroy(&flowtable->hook_list);
+               goto err4;
+       }
+@@ -8695,7 +8695,7 @@ static void nft_commit_release(struct nft_trans *trans)
+               break;
+       case NFT_MSG_DELFLOWTABLE:
+               if (nft_trans_flowtable_update(trans))
+-                      nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++                      nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+               else
+                       nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+               break;
+@@ -9341,7 +9341,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+               break;
+       case NFT_MSG_NEWFLOWTABLE:
+               if (nft_trans_flowtable_update(trans))
+-                      nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++                      nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+               else
+                       nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+               break;
+-- 
+2.39.2
+
index 5aa1ecf584bed13914648d8622cbff908a11dbec..f390b163e501525f50b24cdccb9eaf99ec45eb1f 100644 (file)
@@ -109,3 +109,19 @@ crypto-engine-fix-crypto_queue-backlog-handling.patch
 perf-symbols-fix-return-incorrect-build_id-size-in-e.patch
 perf-tracepoint-fix-memory-leak-in-is_valid_tracepoi.patch
 perf-stat-separate-bperf-from-bpf_profiler.patch
+kvm-x86-mmu-change-tdp_mmu-to-a-read-only-parameter.patch
+kvm-x86-mmu-move-tdp-mmu-vm-init-uninit-behind-tdp_m.patch
+kvm-x86-mmu-replace-open-coded-usage-of-tdp_mmu_page.patch
+kvm-x86-preserve-tdp-mmu-roots-until-they-are-explic.patch
+ksmbd-implements-sess-ksmbd_chann_list-as-xarray.patch
+ksmbd-fix-racy-issue-from-session-setup-and-logoff.patch
+ksmbd-block-asynchronous-requests-when-making-a-dela.patch
+ksmbd-destroy-expired-sessions.patch
+ksmbd-fix-racy-issue-from-smb2-close-and-logoff-with.patch
+wifi-iwlwifi-mvm-fix-potential-memory-leak.patch
+dmaengine-at_xdmac-restore-the-content-of-grws-regis.patch
+cifs-check-only-tcon-status-on-tcon-related-function.patch
+cifs-avoid-potential-races-when-handling-multiple-df.patch
+netfilter-nf_tables-extended-netlink-error-reporting.patch
+netfilter-nf_tables-rename-function-to-destroy-hook-.patch
+netfilter-nf_tables-hit-enoent-on-unexisting-chain-f.patch
diff --git a/queue-6.2/wifi-iwlwifi-mvm-fix-potential-memory-leak.patch b/queue-6.2/wifi-iwlwifi-mvm-fix-potential-memory-leak.patch
new file mode 100644 (file)
index 0000000..3eba233
--- /dev/null
@@ -0,0 +1,38 @@
+From 3a4cd8addf3b258451e56a3775f844c6e643b990 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Apr 2023 12:28:08 +0300
+Subject: wifi: iwlwifi: mvm: fix potential memory leak
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 457d7fb03e6c3d73fbb509bd85fc4b02d1ab405e ]
+
+If we do get multiple notifications from firmware, then
+we might have allocated 'notif', but don't free it. Fix
+that by checking for duplicates before allocation.
+
+Fixes: 4da46a06d443 ("wifi: iwlwifi: mvm: Add support for wowlan info notification")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20230418122405.116758321cc4.I8bdbcbb38c89ac637eaa20dda58fa9165b25893a@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 29f75948ab00c..fe2de813fbf49 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2715,6 +2715,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+                       break;
+               }
++
+               d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+               len = iwl_rx_packet_payload_len(pkt);
+               iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+-- 
+2.39.2
+