--- /dev/null
+From 6b227295b4c6294efce1a5ec1b1a050ebbbb23ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Sep 2023 17:18:46 +0200
+Subject: riscv: fix set_huge_pte_at() for NAPOT mappings when a swap entry is
+ set
+
+From: Alexandre Ghiti <alexghiti@rivosinc.com>
+
+[ Upstream commit 1de195dd0e05d9cba43dec16f83d4ee32af94dd2 ]
+
+We used to determine the number of page table entries to set for a NAPOT
+hugepage by using the pte value which actually fails when the pte to set
+is a swap entry.
+
+So take advantage of a recent fix for arm64 reported in [1] which
+introduces the size of the mapping as an argument of set_huge_pte_at(): we
+can then use this size to compute the number of page table entries to set
+for a NAPOT region.
+
+Link: https://lkml.kernel.org/r/20230928151846.8229-3-alexghiti@rivosinc.com
+Fixes: 82a1a1f3bfb6 ("riscv: mm: support Svnapot in hugetlb page")
+Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Reported-by: Ryan Roberts <ryan.roberts@arm.com>
+Closes: https://lore.kernel.org/linux-arm-kernel/20230922115804.2043771-1-ryan.roberts@arm.com/ [1]
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Palmer Dabbelt <palmer@dabbelt.com>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Qinglin Pan <panqinglin2020@iscas.ac.cn>
+Cc: Conor Dooley <conor@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/mm/hugetlbpage.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
+index 96225a8533ad8..e92e89461c3bc 100644
+--- a/arch/riscv/mm/hugetlbpage.c
++++ b/arch/riscv/mm/hugetlbpage.c
+@@ -182,15 +182,22 @@ void set_huge_pte_at(struct mm_struct *mm,
+ pte_t *ptep,
+ pte_t pte)
+ {
++ unsigned long hugepage_shift;
+ int i, pte_num;
+
+- if (!pte_napot(pte)) {
+- set_pte_at(mm, addr, ptep, pte);
+- return;
+- }
++ if (sz >= PGDIR_SIZE)
++ hugepage_shift = PGDIR_SHIFT;
++ else if (sz >= P4D_SIZE)
++ hugepage_shift = P4D_SHIFT;
++ else if (sz >= PUD_SIZE)
++ hugepage_shift = PUD_SHIFT;
++ else if (sz >= PMD_SIZE)
++ hugepage_shift = PMD_SHIFT;
++ else
++ hugepage_shift = PAGE_SHIFT;
+
+- pte_num = napot_pte_num(napot_cont_order(pte));
+- for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
++ pte_num = sz >> hugepage_shift;
++ for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
+ set_pte_at(mm, addr, ptep, pte);
+ }
+
+--
+2.42.0
+
--- /dev/null
+riscv-fix-set_huge_pte_at-for-napot-mappings-when-a-.patch
+vdpa-mlx5-fix-firmware-error-on-creation-of-1k-vqs.patch
+smb3-allow-controlling-length-of-time-directory-entr.patch
+smb3-allow-controlling-maximum-number-of-cached-dire.patch
+smb3-do-not-start-laundromat-thread-when-dir-leases.patch
+smb-client-do-not-start-laundromat-thread-on-nohandl.patch
+smb-client-make-laundromat-a-delayed-worker.patch
+smb-client-prevent-new-fids-from-being-removed-by-la.patch
--- /dev/null
+From ca3c9d2d3d14b0cb9e15baf154bb5025b555c4be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Oct 2023 17:28:38 -0300
+Subject: smb: client: do not start laundromat thread on nohandlecache
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 3b8bb3171571f92eda863e5f78b063604c61f72a ]
+
+Honor 'nohandlecache' mount option by not starting laundromat thread
+even when SMB server supports directory leases. Do not waste system
+resources by having laundromat thread running with no directory
+caching at all.
+
+Fixes: 2da338ff752a ("smb3: do not start laundromat thread when dir leases disabled")
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/connect.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index e70203d07d5d1..bd33661dcb57f 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2474,8 +2474,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ static struct cifs_tcon *
+ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ {
+- int rc, xid;
+ struct cifs_tcon *tcon;
++ bool nohandlecache;
++ int rc, xid;
+
+ tcon = cifs_find_tcon(ses, ctx);
+ if (tcon) {
+@@ -2493,14 +2494,17 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ goto out_fail;
+ }
+
+- if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+- tcon = tcon_info_alloc(true);
++ if (ses->server->dialect >= SMB20_PROT_ID &&
++ (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
++ nohandlecache = ctx->nohandlecache;
+ else
+- tcon = tcon_info_alloc(false);
++ nohandlecache = true;
++ tcon = tcon_info_alloc(!nohandlecache);
+ if (tcon == NULL) {
+ rc = -ENOMEM;
+ goto out_fail;
+ }
++ tcon->nohandlecache = nohandlecache;
+
+ if (ctx->snapshot_time) {
+ if (ses->server->vals->protocol_id == 0) {
+@@ -2662,10 +2666,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ tcon->nocase = ctx->nocase;
+ tcon->broken_sparse_sup = ctx->no_sparse;
+ tcon->max_cached_dirs = ctx->max_cached_dirs;
+- if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+- tcon->nohandlecache = ctx->nohandlecache;
+- else
+- tcon->nohandlecache = true;
+ tcon->nodelete = ctx->nodelete;
+ tcon->local_lease = ctx->local_lease;
+ INIT_LIST_HEAD(&tcon->pending_opens);
+--
+2.42.0
+
--- /dev/null
+From d292c9229a94395ae7ac80ac76a168fe41fbe7c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Oct 2023 16:04:25 -0300
+Subject: smb: client: make laundromat a delayed worker
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit e95f3f74465072c2545d8e65a3c3a96e37129cf8 ]
+
+By having laundromat kthread processing cached directories on every
+second turned out to be overkill, especially when having multiple SMB
+mounts.
+
+Relax it by using a delayed worker instead that gets scheduled on
+every @dir_cache_timeout (default=30) seconds per tcon.
+
+This also fixes the 1s delay when tearing down tcon.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cached_dir.c | 89 +++++++++++++++-----------------------
+ fs/smb/client/cached_dir.h | 2 +-
+ 2 files changed, 36 insertions(+), 55 deletions(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index e2be8aedb26e3..a9e5d3b7e9a05 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -15,6 +15,7 @@
+ static struct cached_fid *init_cached_dir(const char *path);
+ static void free_cached_dir(struct cached_fid *cfid);
+ static void smb2_close_cached_fid(struct kref *ref);
++static void cfids_laundromat_worker(struct work_struct *work);
+
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ const char *path,
+@@ -572,53 +573,46 @@ static void free_cached_dir(struct cached_fid *cfid)
+ kfree(cfid);
+ }
+
+-static int
+-cifs_cfids_laundromat_thread(void *p)
++static void cfids_laundromat_worker(struct work_struct *work)
+ {
+- struct cached_fids *cfids = p;
++ struct cached_fids *cfids;
+ struct cached_fid *cfid, *q;
+- struct list_head entry;
++ LIST_HEAD(entry);
+
+- while (!kthread_should_stop()) {
+- ssleep(1);
+- INIT_LIST_HEAD(&entry);
+- if (kthread_should_stop())
+- return 0;
+- spin_lock(&cfids->cfid_list_lock);
+- list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+- if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
+- list_del(&cfid->entry);
+- list_add(&cfid->entry, &entry);
+- cfids->num_entries--;
+- }
++ cfids = container_of(work, struct cached_fids, laundromat_work.work);
++
++ spin_lock(&cfids->cfid_list_lock);
++ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
++ if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
++ list_move(&cfid->entry, &entry);
++ cfids->num_entries--;
+ }
+- spin_unlock(&cfids->cfid_list_lock);
++ }
++ spin_unlock(&cfids->cfid_list_lock);
+
+- list_for_each_entry_safe(cfid, q, &entry, entry) {
+- cfid->on_list = false;
+- list_del(&cfid->entry);
++ list_for_each_entry_safe(cfid, q, &entry, entry) {
++ cfid->on_list = false;
++ list_del(&cfid->entry);
++ /*
++ * Cancel and wait for the work to finish in case we are racing
++ * with it.
++ */
++ cancel_work_sync(&cfid->lease_break);
++ if (cfid->has_lease) {
+ /*
+- * Cancel, and wait for the work to finish in
+- * case we are racing with it.
++ * Our lease has not yet been cancelled from the server
++ * so we need to drop the reference.
+ */
+- cancel_work_sync(&cfid->lease_break);
+- if (cfid->has_lease) {
+- /*
+- * We lease has not yet been cancelled from
+- * the server so we need to drop the reference.
+- */
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
++ spin_lock(&cfids->cfid_list_lock);
++ cfid->has_lease = false;
++ spin_unlock(&cfids->cfid_list_lock);
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ }
+-
+- return 0;
++ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ dir_cache_timeout * HZ);
+ }
+
+-
+ struct cached_fids *init_cached_dirs(void)
+ {
+ struct cached_fids *cfids;
+@@ -629,19 +623,10 @@ struct cached_fids *init_cached_dirs(void)
+ spin_lock_init(&cfids->cfid_list_lock);
+ INIT_LIST_HEAD(&cfids->entries);
+
+- /*
+- * since we're in a cifs function already, we know that
+- * this will succeed. No need for try_module_get().
+- */
+- __module_get(THIS_MODULE);
+- cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
+- cfids, "cifsd-cfid-laundromat");
+- if (IS_ERR(cfids->laundromat)) {
+- cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
+- kfree(cfids);
+- module_put(THIS_MODULE);
+- return NULL;
+- }
++ INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
++ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ dir_cache_timeout * HZ);
++
+ return cfids;
+ }
+
+@@ -657,11 +642,7 @@ void free_cached_dirs(struct cached_fids *cfids)
+ if (cfids == NULL)
+ return;
+
+- if (cfids->laundromat) {
+- kthread_stop(cfids->laundromat);
+- cfids->laundromat = NULL;
+- module_put(THIS_MODULE);
+- }
++ cancel_delayed_work_sync(&cfids->laundromat_work);
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+index a82ff2cea789c..81ba0fd5cc16d 100644
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -57,7 +57,7 @@ struct cached_fids {
+ spinlock_t cfid_list_lock;
+ int num_entries;
+ struct list_head entries;
+- struct task_struct *laundromat;
++ struct delayed_work laundromat_work;
+ };
+
+ extern struct cached_fids *init_cached_dirs(void);
+--
+2.42.0
+
--- /dev/null
+From edbe618f8c8c2164bfc78c34bec098e34de2c2ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 17:37:40 -0300
+Subject: smb: client: prevent new fids from being removed by laundromat
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+[ Upstream commit 81ba10959970d15c388bf29866b01b62f387e6a3 ]
+
+Check if @cfid->time is set in laundromat so we guarantee that only
+fully cached fids will be selected for removal. While we're at it,
+add missing locks to protect access of @cfid fields in order to avoid
+races with open_cached_dir() and cfids_laundromat_worker(),
+respectively.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cached_dir.c | 56 ++++++++++++++++++++++++--------------
+ 1 file changed, 35 insertions(+), 21 deletions(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index a9e5d3b7e9a05..fe1bf5b6e0cb3 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -170,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ return -ENOENT;
+ }
+ /*
+- * At this point we either have a lease already and we can just
+- * return it. If not we are guaranteed to be the only thread accessing
+- * this cfid.
++ * Return cached fid if it has a lease. Otherwise, it is either a new
++ * entry or laundromat worker removed it from @cfids->entries. Caller
++ * will put last reference if the latter.
+ */
++ spin_lock(&cfids->cfid_list_lock);
+ if (cfid->has_lease) {
++ spin_unlock(&cfids->cfid_list_lock);
+ *ret_cfid = cfid;
+ kfree(utf16_path);
+ return 0;
+ }
++ spin_unlock(&cfids->cfid_list_lock);
+
+ /*
+ * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+@@ -295,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ goto oshr_free;
+ }
+ }
++ spin_lock(&cfids->cfid_list_lock);
+ cfid->dentry = dentry;
+ cfid->time = jiffies;
+ cfid->has_lease = true;
++ spin_unlock(&cfids->cfid_list_lock);
+
+ oshr_free:
+ kfree(utf16_path);
+@@ -306,24 +311,28 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ spin_lock(&cfids->cfid_list_lock);
+- if (rc && !cfid->has_lease) {
+- if (cfid->on_list) {
+- list_del(&cfid->entry);
+- cfid->on_list = false;
+- cfids->num_entries--;
++ if (!cfid->has_lease) {
++ if (rc) {
++ if (cfid->on_list) {
++ list_del(&cfid->entry);
++ cfid->on_list = false;
++ cfids->num_entries--;
++ }
++ rc = -ENOENT;
++ } else {
++ /*
++ * We are guaranteed to have two references at this
++ * point. One for the caller and one for a potential
++ * lease. Release the Lease-ref so that the directory
++ * will be closed when the caller closes the cached
++ * handle.
++ */
++ spin_unlock(&cfids->cfid_list_lock);
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
++ goto out;
+ }
+- rc = -ENOENT;
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+- if (!rc && !cfid->has_lease) {
+- /*
+- * We are guaranteed to have two references at this point.
+- * One for the caller and one for a potential lease.
+- * Release the Lease-ref so that the directory will be closed
+- * when the caller closes the cached handle.
+- */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+ if (rc) {
+ if (cfid->is_open)
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+@@ -331,7 +340,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ free_cached_dir(cfid);
+ cfid = NULL;
+ }
+-
++out:
+ if (rc == 0) {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+@@ -583,15 +592,18 @@ static void cfids_laundromat_worker(struct work_struct *work)
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+- if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
++ if (cfid->time &&
++ time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
++ cfid->on_list = false;
+ list_move(&cfid->entry, &entry);
+ cfids->num_entries--;
++ /* To prevent race with smb2_cached_lease_break() */
++ kref_get(&cfid->refcount);
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+- cfid->on_list = false;
+ list_del(&cfid->entry);
+ /*
+ * Cancel and wait for the work to finish in case we are racing
+@@ -608,6 +620,8 @@ static void cfids_laundromat_worker(struct work_struct *work)
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
++ /* Drop the extra reference opened above */
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+--
+2.42.0
+
--- /dev/null
+From 7030e469508b99343ca8b27f4acf8aeeb7d51fc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Aug 2023 22:48:41 -0500
+Subject: smb3: allow controlling length of time directory entries are cached
+ with dir leases
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit 238b351d0935df568ecb3dc5aef25971778f0f7c ]
+
+Currently with directory leases we cache directory contents for a fixed period
+of time (default 30 seconds) but for many workloads this is too short. Allow
+configuring the maximum amount of time directory entries are cached when a
+directory lease is held on that directory. Add module load parm "max_dir_cache"
+
+For example to set the timeout to 10 minutes you would do:
+
+ echo 600 > /sys/module/cifs/parameters/dir_cache_timeout
+
+or to disable caching directory contents:
+
+ echo 0 > /sys/module/cifs/parameters/dir_cache_timeout
+
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cached_dir.c | 4 ++--
+ fs/smb/client/cifsfs.c | 10 ++++++++++
+ fs/smb/client/cifsglob.h | 1 +
+ 3 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 2d5e9a9d5b8be..9d84c4a7bd0ce 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -145,7 +145,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ const char *npath;
+
+ if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+- is_smb1_server(tcon->ses->server))
++ is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
+ return -EOPNOTSUPP;
+
+ ses = tcon->ses;
+@@ -582,7 +582,7 @@ cifs_cfids_laundromat_thread(void *p)
+ return 0;
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+- if (time_after(jiffies, cfid->time + HZ * 30)) {
++ if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
+ list_del(&cfid->entry);
+ list_add(&cfid->entry, &entry);
+ cfids->num_entries--;
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index a4d8b0ea1c8cb..9a6d7e66408d1 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -117,6 +117,10 @@ module_param(cifs_max_pending, uint, 0444);
+ MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
+ "CIFS/SMB1 dialect (N/A for SMB3) "
+ "Default: 32767 Range: 2 to 32767.");
++unsigned int dir_cache_timeout = 30;
++module_param(dir_cache_timeout, uint, 0644);
++MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
++ "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
+ #ifdef CONFIG_CIFS_STATS2
+ unsigned int slow_rsp_threshold = 1;
+ module_param(slow_rsp_threshold, uint, 0644);
+@@ -1679,6 +1683,12 @@ init_cifs(void)
+ CIFS_MAX_REQ);
+ }
+
++ /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
++ if (dir_cache_timeout > 65000) {
++ dir_cache_timeout = 65000;
++ cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
++ }
++
+ cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!cifsiod_wq) {
+ rc = -ENOMEM;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 35782a6bede0b..f8eb787ecffab 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1987,6 +1987,7 @@ extern unsigned int CIFSMaxBufSize; /* max size not including hdr */
+ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
+ extern unsigned int cifs_min_small; /* min size of small buf pool */
+ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
++extern unsigned int dir_cache_timeout; /* max time for directory lease caching of dir */
+ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */
+ extern atomic_t mid_count;
+
+--
+2.42.0
+
--- /dev/null
+From 2d238af1cd45ff4fdd3b7ef161f41f686bc02756 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Sep 2023 02:15:14 -0500
+Subject: smb3: allow controlling maximum number of cached directories
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit 6a50d71d0ffff6791737eb502b27f74fb87d0cae ]
+
+Allow adjusting the maximum number of cached directories per share
+(defaults to 16) via mount parm "max_cached_dirs"
+
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cached_dir.c | 7 ++++---
+ fs/smb/client/cached_dir.h | 2 +-
+ fs/smb/client/cifsfs.c | 2 ++
+ fs/smb/client/cifsglob.h | 1 +
+ fs/smb/client/connect.c | 1 +
+ fs/smb/client/fs_context.c | 11 ++++++++++-
+ fs/smb/client/fs_context.h | 4 +++-
+ 7 files changed, 22 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index 9d84c4a7bd0ce..b17f067e4ada0 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -18,7 +18,8 @@ static void smb2_close_cached_fid(struct kref *ref);
+
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ const char *path,
+- bool lookup_only)
++ bool lookup_only,
++ __u32 max_cached_dirs)
+ {
+ struct cached_fid *cfid;
+
+@@ -43,7 +44,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+- if (cfids->num_entries >= MAX_CACHED_FIDS) {
++ if (cfids->num_entries >= max_cached_dirs) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+@@ -162,7 +163,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ if (!utf16_path)
+ return -ENOMEM;
+
+- cfid = find_or_create_cached_dir(cfids, path, lookup_only);
++ cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
+ if (cfid == NULL) {
+ kfree(utf16_path);
+ return -ENOENT;
+diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
+index facc9b154d009..a82ff2cea789c 100644
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -49,7 +49,7 @@ struct cached_fid {
+ struct cached_dirents dirents;
+ };
+
+-#define MAX_CACHED_FIDS 16
++/* default MAX_CACHED_FIDS is 16 */
+ struct cached_fids {
+ /* Must be held when:
+ * - accessing the cfids->entries list
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 9a6d7e66408d1..e19df244ea7ea 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -699,6 +699,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+ if (tcon->handle_timeout)
+ seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
++ if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
++ seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
+
+ /*
+ * Display file and directory attribute timeout in seconds.
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index f8eb787ecffab..b4c1c4742f08a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1192,6 +1192,7 @@ struct cifs_tcon {
+ __u32 max_chunks;
+ __u32 max_bytes_chunk;
+ __u32 max_bytes_copy;
++ __u32 max_cached_dirs;
+ #ifdef CONFIG_CIFS_FSCACHE
+ u64 resource_id; /* server resource id */
+ struct fscache_volume *fscache; /* cookie for share */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 352e251c41132..f00d02608ee46 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -2657,6 +2657,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ tcon->retry = ctx->retry;
+ tcon->nocase = ctx->nocase;
+ tcon->broken_sparse_sup = ctx->no_sparse;
++ tcon->max_cached_dirs = ctx->max_cached_dirs;
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+ tcon->nohandlecache = ctx->nohandlecache;
+ else
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index f12203c49b802..a3493da12ad1e 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -150,6 +150,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
+ fsparam_u32("closetimeo", Opt_closetimeo),
+ fsparam_u32("echo_interval", Opt_echo_interval),
+ fsparam_u32("max_credits", Opt_max_credits),
++ fsparam_u32("max_cached_dirs", Opt_max_cached_dirs),
+ fsparam_u32("handletimeout", Opt_handletimeout),
+ fsparam_u64("snapshot", Opt_snapshot),
+ fsparam_u32("max_channels", Opt_max_channels),
+@@ -1165,6 +1166,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ if (result.uint_32 > 1)
+ ctx->multichannel = true;
+ break;
++ case Opt_max_cached_dirs:
++ if (result.uint_32 < 1) {
++ cifs_errorf(fc, "%s: Invalid max_cached_dirs, needs to be 1 or more\n",
++ __func__);
++ goto cifs_parse_mount_err;
++ }
++ ctx->max_cached_dirs = result.uint_32;
++ break;
+ case Opt_handletimeout:
+ ctx->handle_timeout = result.uint_32;
+ if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+@@ -1593,7 +1602,7 @@ int smb3_init_fs_context(struct fs_context *fc)
+ ctx->acregmax = CIFS_DEF_ACTIMEO;
+ ctx->acdirmax = CIFS_DEF_ACTIMEO;
+ ctx->closetimeo = SMB3_DEF_DCLOSETIMEO;
+-
++ ctx->max_cached_dirs = MAX_CACHED_FIDS;
+ /* Most clients set timeout to 0, allows server to use its default */
+ ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index f4eaf85589022..9d8d34af02114 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -128,6 +128,7 @@ enum cifs_param {
+ Opt_closetimeo,
+ Opt_echo_interval,
+ Opt_max_credits,
++ Opt_max_cached_dirs,
+ Opt_snapshot,
+ Opt_max_channels,
+ Opt_handletimeout,
+@@ -261,6 +262,7 @@ struct smb3_fs_context {
+ __u32 handle_timeout; /* persistent and durable handle timeout in ms */
+ unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+ unsigned int max_channels;
++ unsigned int max_cached_dirs;
+ __u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
+ bool rootfs:1; /* if it's a SMB root file system */
+ bool witness:1; /* use witness protocol */
+@@ -287,7 +289,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+ */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+ #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+-
++#define MAX_CACHED_FIDS 16
+ extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+
+ #endif
+--
+2.42.0
+
--- /dev/null
+From 5637d1cdb5ebb083288d4cd3955bc7e64a94e1b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Sep 2023 11:35:53 -0500
+Subject: smb3: do not start laundromat thread when dir leases disabled
+
+From: Steve French <stfrench@microsoft.com>
+
+When no directory lease support, or for IPC shares where directories
+can not be opened, do not start an unneeded laundromat thread for
+that mount (it wastes resources).
+
+Fixes: d14de8067e3f ("cifs: Add a laundromat thread for cached directories")
+Reviewed-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Acked-by: Tom Talpey <tom@talpey.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+(cherry picked from commit 2da338ff752a2789470d733111a5241f30026675)
+---
+ fs/smb/client/cached_dir.c | 6 ++++++
+ fs/smb/client/cifsglob.h | 2 +-
+ fs/smb/client/cifsproto.h | 2 +-
+ fs/smb/client/connect.c | 8 ++++++--
+ fs/smb/client/misc.c | 14 +++++++++-----
+ fs/smb/client/smb2pdu.c | 2 +-
+ 6 files changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index b17f067e4ada0..e2be8aedb26e3 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -452,6 +452,9 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
+ struct cached_fid *cfid, *q;
+ LIST_HEAD(entry);
+
++ if (cfids == NULL)
++ return;
++
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ list_move(&cfid->entry, &entry);
+@@ -651,6 +654,9 @@ void free_cached_dirs(struct cached_fids *cfids)
+ struct cached_fid *cfid, *q;
+ LIST_HEAD(entry);
+
++ if (cfids == NULL)
++ return;
++
+ if (cfids->laundromat) {
+ kthread_stop(cfids->laundromat);
+ cfids->laundromat = NULL;
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index b4c1c4742f08a..ac68fed5ad28a 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1914,7 +1914,7 @@ require use of the stronger protocol */
+ * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
+ * ->can_cache_brlcks
+ * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
+- * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
++ * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc
+ * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
+ * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
+ * ->invalidHandle initiate_cifs_search
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 1d71d658e1679..bd0a1505719a4 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -513,7 +513,7 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
+
+ extern struct cifs_ses *sesInfoAlloc(void);
+ extern void sesInfoFree(struct cifs_ses *);
+-extern struct cifs_tcon *tconInfoAlloc(void);
++extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
+ extern void tconInfoFree(struct cifs_tcon *);
+
+ extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index f00d02608ee46..e70203d07d5d1 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -1882,7 +1882,8 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ }
+ }
+
+- tcon = tconInfoAlloc();
++ /* no need to setup directory caching on IPC share, so pass in false */
++ tcon = tcon_info_alloc(false);
+ if (tcon == NULL)
+ return -ENOMEM;
+
+@@ -2492,7 +2493,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ goto out_fail;
+ }
+
+- tcon = tconInfoAlloc();
++ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
++ tcon = tcon_info_alloc(true);
++ else
++ tcon = tcon_info_alloc(false);
+ if (tcon == NULL) {
+ rc = -ENOMEM;
+ goto out_fail;
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index d7e85d9a26553..249fac8be5a51 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -113,18 +113,22 @@ sesInfoFree(struct cifs_ses *buf_to_free)
+ }
+
+ struct cifs_tcon *
+-tconInfoAlloc(void)
++tcon_info_alloc(bool dir_leases_enabled)
+ {
+ struct cifs_tcon *ret_buf;
+
+ ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
+ if (!ret_buf)
+ return NULL;
+- ret_buf->cfids = init_cached_dirs();
+- if (!ret_buf->cfids) {
+- kfree(ret_buf);
+- return NULL;
++
++ if (dir_leases_enabled == true) {
++ ret_buf->cfids = init_cached_dirs();
++ if (!ret_buf->cfids) {
++ kfree(ret_buf);
++ return NULL;
++ }
+ }
++ /* else ret_buf->cfids is already set to NULL above */
+
+ atomic_inc(&tconInfoAllocCount);
+ ret_buf->status = TID_NEW;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 9c7e46b7e7c7a..c22cc72223814 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3871,7 +3871,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ goto done;
+
+ /* allocate a dummy tcon struct used for reconnect */
+- tcon = tconInfoAlloc();
++ tcon = tcon_info_alloc(false);
+ if (!tcon) {
+ resched = true;
+ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+--
+2.42.0
+
--- /dev/null
+From 444ef753f11a7e076aa2fa7e9144ad7331141b7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Aug 2023 18:50:56 +0300
+Subject: vdpa/mlx5: Fix firmware error on creation of 1k VQs
+
+From: Dragos Tatulea <dtatulea@nvidia.com>
+
+[ Upstream commit abb0dcf9938c93f765abf8cb45567cadef0af6b2 ]
+
+A firmware error is triggered when configuring a 9k MTU on the PF after
+switching to switchdev mode and then using a vdpa device with larger
+(1k) rings:
+mlx5_cmd_out_err: CREATE_GENERAL_OBJECT(0xa00) op_mod(0xd) failed, status bad resource(0x5), syndrome (0xf6db90), err(-22)
+
+This is due to the fact that the hw VQ size parameters are computed
+based on the umem_1/2/3_buffer_param_a/b capabilities and all
+device capabilities are read only when the driver is moved to switchdev mode.
+
+The problematic configuration flow looks like this:
+1) Create VF
+2) Unbind VF
+3) Switch PF to switchdev mode.
+4) Bind VF
+5) Set PF MTU to 9k
+6) create vDPA device
+7) Start VM with vDPA device and 1K queue size
+
+Note that setting the MTU before step 3) doesn't trigger this issue.
+
+This patch reads the forementioned umem parameters at the latest point
+possible before the VQs of the device are created.
+
+v2:
+- Allocate output with kmalloc to reduce stack frame size.
+- Removed stable from cc.
+
+Fixes: 1a86b377aa21 ("vdpa/mlx5: Add VDPA driver for supported mlx5 devices")
+Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
+Message-Id: <20230831155702.1080754-1-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 63 ++++++++++++++++++++++++++-----
+ drivers/vdpa/mlx5/net/mlx5_vnet.h | 9 +++++
+ 2 files changed, 63 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 37be945a02308..a01d27b7af1b5 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+ mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+ }
+
++static int read_umem_params(struct mlx5_vdpa_net *ndev)
++{
++ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
++ u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
++ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
++ int out_size;
++ void *caps;
++ void *out;
++ int err;
++
++ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
++ out = kzalloc(out_size, GFP_KERNEL);
++ if (!out)
++ return -ENOMEM;
++
++ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
++ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
++ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
++ if (err) {
++ mlx5_vdpa_warn(&ndev->mvdev,
++ "Failed reading vdpa umem capabilities with err %d\n", err);
++ goto out;
++ }
++
++ caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
++
++ ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
++ ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
++
++ ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
++ ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
++
++ ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
++ ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
++
++out:
++ kfree(out);
++ return 0;
++}
++
+ static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+ struct mlx5_vdpa_umem **umemp)
+ {
+- struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+- int p_a;
+- int p_b;
++ u32 p_a;
++ u32 p_b;
+
+ switch (num) {
+ case 1:
+- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
+- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
++ p_a = ndev->umem_1_buffer_param_a;
++ p_b = ndev->umem_1_buffer_param_b;
+ *umemp = &mvq->umem1;
+ break;
+ case 2:
+- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
+- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
++ p_a = ndev->umem_2_buffer_param_a;
++ p_b = ndev->umem_2_buffer_param_b;
+ *umemp = &mvq->umem2;
+ break;
+ case 3:
+- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
+- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
++ p_a = ndev->umem_3_buffer_param_a;
++ p_b = ndev->umem_3_buffer_param_b;
+ *umemp = &mvq->umem3;
+ break;
+ }
++
+ (*umemp)->size = p_a * mvq->num_ent + p_b;
+ }
+
+@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
+ goto out;
+ }
+ mlx5_vdpa_add_debugfs(ndev);
++
++ err = read_umem_params(ndev);
++ if (err)
++ goto err_setup;
++
+ err = setup_virtqueues(mvdev);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
+index 36c44d9fdd166..65ebbba206621 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
+@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
+ struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
+ struct mlx5_vdpa_irq_pool irqp;
+ struct dentry *debugfs;
++
++ u32 umem_1_buffer_param_a;
++ u32 umem_1_buffer_param_b;
++
++ u32 umem_2_buffer_param_a;
++ u32 umem_2_buffer_param_b;
++
++ u32 umem_3_buffer_param_a;
++ u32 umem_3_buffer_param_b;
+ };
+
+ struct mlx5_vdpa_counter {
+--
+2.42.0
+