--- /dev/null
+From stable+bounces-73637-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:38 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:58 -0400
+Subject: nfsd: expose /proc/net/sunrpc/nfsd in net namespaces
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-17-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 93483ac5fec62cc1de166051b219d953bb5e4ef4 ]
+
+We are running nfsd servers inside of containers with their own network
+namespace, and we want to monitor these services using the stats found
+in /proc. However these are not exposed in the proc inside of the
+container, so we have to bind mount the host /proc into our containers
+to get at this information.
+
+Separate out the stat counters init and the proc registration, and move
+the proc registration into the pernet operations entry and exit points
+so that these stats can be exposed inside of network namespaces.
+
+This is an intermediate step, this just exposes the global counters in
+the network namespace. Subsequent patches will move these counters into
+the per-network namespace container.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c | 8 +++++---
+ fs/nfsd/stats.c | 21 ++++++---------------
+ fs/nfsd/stats.h | 6 ++++--
+ 3 files changed, 15 insertions(+), 20 deletions(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1466,6 +1466,7 @@ static __net_init int nfsd_init_net(stru
+ nfsd4_init_leases_net(nn);
+ get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ seqlock_init(&nn->writeverf_lock);
++ nfsd_proc_stat_init(net);
+
+ return 0;
+
+@@ -1481,6 +1482,7 @@ static __net_exit void nfsd_exit_net(str
+ {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
++ nfsd_proc_stat_shutdown(net);
+ nfsd_net_reply_cache_destroy(nn);
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+@@ -1504,7 +1506,7 @@ static int __init init_nfsd(void)
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+- retval = nfsd_stat_init(); /* Statistics */
++ retval = nfsd_stat_counters_init(); /* Statistics */
+ if (retval)
+ goto out_free_pnfs;
+ retval = nfsd_drc_slab_create();
+@@ -1540,7 +1542,7 @@ out_free_lockd:
+ nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+ out_free_stat:
+- nfsd_stat_shutdown();
++ nfsd_stat_counters_destroy();
+ out_free_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+@@ -1557,7 +1559,7 @@ static void __exit exit_nfsd(void)
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+- nfsd_stat_shutdown();
++ nfsd_stat_counters_destroy();
+ nfsd_lockd_shutdown();
+ nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -106,31 +106,22 @@ void nfsd_percpu_counters_destroy(struct
+ percpu_counter_destroy(&counters[i]);
+ }
+
+-static int nfsd_stat_counters_init(void)
++int nfsd_stat_counters_init(void)
+ {
+ return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+-static void nfsd_stat_counters_destroy(void)
++void nfsd_stat_counters_destroy(void)
+ {
+ nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+-int nfsd_stat_init(void)
++void nfsd_proc_stat_init(struct net *net)
+ {
+- int err;
+-
+- err = nfsd_stat_counters_init();
+- if (err)
+- return err;
+-
+- svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
+-
+- return 0;
++ svc_proc_register(net, &nfsd_svcstats, &nfsd_proc_ops);
+ }
+
+-void nfsd_stat_shutdown(void)
++void nfsd_proc_stat_shutdown(struct net *net)
+ {
+- nfsd_stat_counters_destroy();
+- svc_proc_unregister(&init_net, "nfsd");
++ svc_proc_unregister(net, "nfsd");
+ }
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -39,8 +39,10 @@ extern struct svc_stat nfsd_svcstats;
+ int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
+-int nfsd_stat_init(void);
+-void nfsd_stat_shutdown(void);
++int nfsd_stat_counters_init(void);
++void nfsd_stat_counters_destroy(void);
++void nfsd_proc_stat_init(struct net *net);
++void nfsd_proc_stat_shutdown(struct net *net);
+
+ static inline void nfsd_stats_rc_hits_inc(void)
+ {
--- /dev/null
+From stable+bounces-73630-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:54 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:51 -0400
+Subject: NFSD: Fix frame size warning in svc_export_parse()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, kernel test robot <lkp@intel.com>, Amir Goldstein <amir73il@gmail.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-10-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 6939ace1f22681fface7841cdbf34d3204cc94b5 ]
+
+fs/nfsd/export.c: In function 'svc_export_parse':
+fs/nfsd/export.c:737:1: warning: the frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]
+ 737 | }
+
+On my systems, svc_export_parse() has a stack frame of over 800
+bytes, not 1040, but nonetheless, it could do with some reduction.
+
+When a struct svc_export is on the stack, it's a temporary structure
+used as an argument, and not visible as an actual exported FS. No
+need to reserve space for export_stats in such cases.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202310012359.YEw5IrK6-lkp@intel.com/
+Cc: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: 4b14885411f7 ("nfsd: make all of the nfsd stats per-network namespace")
+[ cel: adjusted to apply to v5.10.y ]
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/export.c | 32 +++++++++++++++++++++++---------
+ fs/nfsd/export.h | 4 ++--
+ fs/nfsd/stats.h | 12 ++++++------
+ 3 files changed, 31 insertions(+), 17 deletions(-)
+
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -339,12 +339,16 @@ static int export_stats_init(struct expo
+
+ static void export_stats_reset(struct export_stats *stats)
+ {
+- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
++ if (stats)
++ nfsd_percpu_counters_reset(stats->counter,
++ EXP_STATS_COUNTERS_NUM);
+ }
+
+ static void export_stats_destroy(struct export_stats *stats)
+ {
+- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
++ if (stats)
++ nfsd_percpu_counters_destroy(stats->counter,
++ EXP_STATS_COUNTERS_NUM);
+ }
+
+ static void svc_export_put(struct kref *ref)
+@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
+ nfsd4_fslocs_free(&exp->ex_fslocs);
+- export_stats_destroy(&exp->ex_stats);
++ export_stats_destroy(exp->ex_stats);
++ kfree(exp->ex_stats);
+ kfree(exp->ex_uuid);
+ kfree_rcu(exp, ex_rcu);
+ }
+@@ -738,13 +743,15 @@ static int svc_export_show(struct seq_fi
+ seq_putc(m, '\t');
+ seq_escape(m, exp->ex_client->name, " \t\n\\");
+ if (export_stats) {
+- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
++ struct percpu_counter *counter = exp->ex_stats->counter;
++
++ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
+ seq_printf(m, "\tfh_stale: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
+ seq_printf(m, "\tio_read: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
+ seq_printf(m, "\tio_write: %lld\n",
+- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
++ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
+ seq_putc(m, '\n');
+ return 0;
+ }
+@@ -790,7 +797,7 @@ static void svc_export_init(struct cache
+ new->ex_layout_types = 0;
+ new->ex_uuid = NULL;
+ new->cd = item->cd;
+- export_stats_reset(&new->ex_stats);
++ export_stats_reset(new->ex_stats);
+ }
+
+ static void export_update(struct cache_head *cnew, struct cache_head *citem)
+@@ -826,7 +833,14 @@ static struct cache_head *svc_export_all
+ if (!i)
+ return NULL;
+
+- if (export_stats_init(&i->ex_stats)) {
++ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
++ if (!i->ex_stats) {
++ kfree(i);
++ return NULL;
++ }
++
++ if (export_stats_init(i->ex_stats)) {
++ kfree(i->ex_stats);
+ kfree(i);
+ return NULL;
+ }
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -64,10 +64,10 @@ struct svc_export {
+ struct cache_head h;
+ struct auth_domain * ex_client;
+ int ex_flags;
++ int ex_fsid;
+ struct path ex_path;
+ kuid_t ex_anon_uid;
+ kgid_t ex_anon_gid;
+- int ex_fsid;
+ unsigned char * ex_uuid; /* 16 byte fsid */
+ struct nfsd4_fs_locations ex_fslocs;
+ uint32_t ex_nflavors;
+@@ -76,7 +76,7 @@ struct svc_export {
+ struct nfsd4_deviceid_map *ex_devid_map;
+ struct cache_detail *cd;
+ struct rcu_head ex_rcu;
+- struct export_stats ex_stats;
++ struct export_stats *ex_stats;
+ };
+
+ /* an "export key" (expkey) maps a filehandlefragement to an
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -60,22 +60,22 @@ static inline void nfsd_stats_rc_nocache
+ static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
+ {
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
+- if (exp)
+- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
++ if (exp && exp->ex_stats)
++ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
+ }
+
+ static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
+ {
+ percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
+- if (exp)
+- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
++ if (exp && exp->ex_stats)
++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
+ }
+
+ static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
+ {
+ percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
+- if (exp)
+- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
++ if (exp && exp->ex_stats)
++ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
+ }
+
+ static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
--- /dev/null
+From stable+bounces-73638-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:43 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:59 -0400
+Subject: nfsd: make all of the nfsd stats per-network namespace
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-18-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 4b14885411f74b2b0ce0eb2b39d0fffe54e5ca0d ]
+
+We have a global set of counters that we modify for all of the nfsd
+operations, but now that we're exposing these stats across all network
+namespaces we need to make the stats also be per-network namespace. We
+already have some caching stats that are per-network namespace, so move
+these definitions into the same counter and then adjust all the helpers
+and users of these stats to provide the appropriate nfsd_net struct so
+that the stats are maintained for the per-network namespace objects.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+[ cel: adjusted to apply to v5.10.y ]
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/cache.h | 2 --
+ fs/nfsd/netns.h | 17 +++++++++++++++--
+ fs/nfsd/nfs4proc.c | 6 +++---
+ fs/nfsd/nfscache.c | 36 +++++++-----------------------------
+ fs/nfsd/nfsctl.c | 12 +++---------
+ fs/nfsd/nfsfh.c | 3 ++-
+ fs/nfsd/stats.c | 24 +++++++++++++-----------
+ fs/nfsd/stats.h | 49 +++++++++++++++++--------------------------------
+ fs/nfsd/vfs.c | 6 ++++--
+ 9 files changed, 64 insertions(+), 91 deletions(-)
+
+--- a/fs/nfsd/cache.h
++++ b/fs/nfsd/cache.h
+@@ -80,8 +80,6 @@ enum {
+
+ int nfsd_drc_slab_create(void);
+ void nfsd_drc_slab_free(void);
+-int nfsd_net_reply_cache_init(struct nfsd_net *nn);
+-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
+ int nfsd_reply_cache_init(struct nfsd_net *);
+ void nfsd_reply_cache_shutdown(struct nfsd_net *);
+ int nfsd_cache_lookup(struct svc_rqst *);
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -10,6 +10,7 @@
+
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <linux/nfs4.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/siphash.h>
+
+@@ -28,7 +29,19 @@ enum {
+ NFSD_STATS_PAYLOAD_MISSES,
+ /* amount of memory (in bytes) currently consumed by the DRC */
+ NFSD_STATS_DRC_MEM_USAGE,
+- NFSD_NET_COUNTERS_NUM
++ NFSD_STATS_RC_HITS, /* repcache hits */
++ NFSD_STATS_RC_MISSES, /* repcache misses */
++ NFSD_STATS_RC_NOCACHE, /* uncached reqs */
++ NFSD_STATS_FH_STALE, /* FH stale error */
++ NFSD_STATS_IO_READ, /* bytes returned to read requests */
++ NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
++#ifdef CONFIG_NFSD_V4
++ NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
++ NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
++#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
++ NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */
++#endif
++ NFSD_STATS_COUNTERS_NUM
+ };
+
+ /*
+@@ -168,7 +181,7 @@ struct nfsd_net {
+ atomic_t num_drc_entries;
+
+ /* Per-netns stats counters */
+- struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
++ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+
+ /* longest hash chain seen */
+ unsigned int longest_chain;
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2435,10 +2435,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
+ return rpc_success;
+ }
+
+-static inline void nfsd4_increment_op_stats(u32 opnum)
++static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum)
+ {
+ if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]);
+ }
+
+ static const struct nfsd4_operation nfsd4_ops[];
+@@ -2713,7 +2713,7 @@ encode_op:
+ status, nfsd4_op_name(op->opnum));
+
+ nfsd4_cstate_clear_replay(cstate);
+- nfsd4_increment_op_stats(op->opnum);
++ nfsd4_increment_op_stats(nn, op->opnum);
+ }
+
+ fh_put(current_fh);
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -176,27 +176,6 @@ void nfsd_drc_slab_free(void)
+ kmem_cache_destroy(drc_slab);
+ }
+
+-/**
+- * nfsd_net_reply_cache_init - per net namespace reply cache set-up
+- * @nn: nfsd_net being initialized
+- *
+- * Returns zero on succes; otherwise a negative errno is returned.
+- */
+-int nfsd_net_reply_cache_init(struct nfsd_net *nn)
+-{
+- return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+-/**
+- * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
+- * @nn: nfsd_net being freed
+- *
+- */
+-void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
+-{
+- nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ {
+ unsigned int hashsize;
+@@ -478,7 +457,7 @@ out:
+ */
+ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ {
+- struct nfsd_net *nn;
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct svc_cacherep *rp, *found;
+ __wsum csum;
+ struct nfsd_drc_bucket *b;
+@@ -489,7 +468,7 @@ int nfsd_cache_lookup(struct svc_rqst *r
+
+ rqstp->rq_cacherep = NULL;
+ if (type == RC_NOCACHE) {
+- nfsd_stats_rc_nocache_inc();
++ nfsd_stats_rc_nocache_inc(nn);
+ goto out;
+ }
+
+@@ -499,7 +478,6 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ * Since the common case is a cache miss followed by an insert,
+ * preallocate an entry.
+ */
+- nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ rp = nfsd_cacherep_alloc(rqstp, csum, nn);
+ if (!rp)
+ goto out;
+@@ -517,7 +495,7 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ freed = nfsd_cacherep_dispose(&dispose);
+ trace_nfsd_drc_gc(nn, freed);
+
+- nfsd_stats_rc_misses_inc();
++ nfsd_stats_rc_misses_inc(nn);
+ atomic_inc(&nn->num_drc_entries);
+ nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
+ goto out;
+@@ -525,7 +503,7 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ found_entry:
+ /* We found a matching entry which is either in progress or done. */
+ nfsd_reply_cache_free_locked(NULL, rp, nn);
+- nfsd_stats_rc_hits_inc();
++ nfsd_stats_rc_hits_inc(nn);
+ rtn = RC_DROPIT;
+ rp = found;
+
+@@ -675,11 +653,11 @@ int nfsd_reply_cache_stats_show(struct s
+ seq_printf(m, "mem usage: %lld\n",
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
+ seq_printf(m, "cache hits: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]));
+ seq_printf(m, "cache misses: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]));
+ seq_printf(m, "not cached: %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]));
+ seq_printf(m, "payload misses: %lld\n",
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
+ seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1458,7 +1458,7 @@ static __net_init int nfsd_init_net(stru
+ retval = nfsd_idmap_init(net);
+ if (retval)
+ goto out_idmap_error;
+- retval = nfsd_net_reply_cache_init(nn);
++ retval = nfsd_stat_counters_init(nn);
+ if (retval)
+ goto out_repcache_error;
+ nn->nfsd_versions = NULL;
+@@ -1483,7 +1483,7 @@ static __net_exit void nfsd_exit_net(str
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfsd_proc_stat_shutdown(net);
+- nfsd_net_reply_cache_destroy(nn);
++ nfsd_stat_counters_destroy(nn);
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+ nfsd_netns_free_versions(nn);
+@@ -1506,12 +1506,9 @@ static int __init init_nfsd(void)
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+- retval = nfsd_stat_counters_init(); /* Statistics */
+- if (retval)
+- goto out_free_pnfs;
+ retval = nfsd_drc_slab_create();
+ if (retval)
+- goto out_free_stat;
++ goto out_free_pnfs;
+ nfsd_lockd_init(); /* lockd->nfsd callbacks */
+ retval = create_proc_exports_entry();
+ if (retval)
+@@ -1541,8 +1538,6 @@ out_free_exports:
+ out_free_lockd:
+ nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+-out_free_stat:
+- nfsd_stat_counters_destroy();
+ out_free_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+@@ -1559,7 +1554,6 @@ static void __exit exit_nfsd(void)
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+- nfsd_stat_counters_destroy();
+ nfsd_lockd_shutdown();
+ nfsd4_free_slabs();
+ nfsd4_exit_pnfs();
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -326,6 +326,7 @@ out:
+ __be32
+ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ {
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct svc_export *exp = NULL;
+ struct dentry *dentry;
+ __be32 error;
+@@ -399,7 +400,7 @@ skip_pseudoflavor_check:
+ }
+ out:
+ if (error == nfserr_stale)
+- nfsd_stats_fh_stale_inc(exp);
++ nfsd_stats_fh_stale_inc(nn, exp);
+ return error;
+ }
+
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -34,15 +34,17 @@ struct svc_stat nfsd_svcstats = {
+
+ static int nfsd_show(struct seq_file *seq, void *v)
+ {
++ struct net *net = PDE_DATA(file_inode(seq->file));
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int i;
+
+ seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]),
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
+
+ /* thread usage: */
+ seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
+@@ -63,7 +65,7 @@ static int nfsd_show(struct seq_file *se
+ seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ for (i = 0; i <= LAST_NFS4_OP; i++) {
+ seq_printf(seq, " %lld",
+- percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)]));
+ }
+
+ seq_putc(seq, '\n');
+@@ -106,14 +108,14 @@ void nfsd_percpu_counters_destroy(struct
+ percpu_counter_destroy(&counters[i]);
+ }
+
+-int nfsd_stat_counters_init(void)
++int nfsd_stat_counters_init(struct nfsd_net *nn)
+ {
+- return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++ return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+-void nfsd_stat_counters_destroy(void)
++void nfsd_stat_counters_destroy(struct nfsd_net *nn)
+ {
+- nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++ nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+
+ void nfsd_proc_stat_init(struct net *net)
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,25 +10,7 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+
+-
+-enum {
+- NFSD_STATS_RC_HITS, /* repcache hits */
+- NFSD_STATS_RC_MISSES, /* repcache misses */
+- NFSD_STATS_RC_NOCACHE, /* uncached reqs */
+- NFSD_STATS_FH_STALE, /* FH stale error */
+- NFSD_STATS_IO_READ, /* bytes returned to read requests */
+- NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
+-#ifdef CONFIG_NFSD_V4
+- NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
+- NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+-#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
+-#endif
+- NFSD_STATS_COUNTERS_NUM
+-};
+-
+ struct nfsd_stats {
+- struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+-
+ atomic_t th_cnt; /* number of available threads */
+ };
+
+@@ -39,43 +21,46 @@ extern struct svc_stat nfsd_svcstats;
+ int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
+-int nfsd_stat_counters_init(void);
+-void nfsd_stat_counters_destroy(void);
++int nfsd_stat_counters_init(struct nfsd_net *nn);
++void nfsd_stat_counters_destroy(struct nfsd_net *nn);
+ void nfsd_proc_stat_init(struct net *net);
+ void nfsd_proc_stat_shutdown(struct net *net);
+
+-static inline void nfsd_stats_rc_hits_inc(void)
++static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]);
+ }
+
+-static inline void nfsd_stats_rc_misses_inc(void)
++static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]);
+ }
+
+-static inline void nfsd_stats_rc_nocache_inc(void)
++static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]);
+ }
+
+-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
++static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn,
++ struct svc_export *exp)
+ {
+- percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]);
+ if (exp && exp->ex_stats)
+ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
+ }
+
+-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_read_add(struct nfsd_net *nn,
++ struct svc_export *exp, s64 amount)
+ {
+- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
+ }
+
+-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_write_add(struct nfsd_net *nn,
++ struct svc_export *exp, s64 amount)
+ {
+- percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
+ }
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1000,7 +1000,9 @@ static __be32 nfsd_finish_read(struct sv
+ unsigned long *count, u32 *eof, ssize_t host_err)
+ {
+ if (host_err >= 0) {
+- nfsd_stats_io_read_add(fhp->fh_export, host_err);
++ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++
++ nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
+ *eof = nfsd_eof_on_read(file, offset, host_err, *count);
+ *count = host_err;
+ fsnotify_access(file);
+@@ -1143,7 +1145,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
+ goto out_nfserr;
+ }
+ *cnt = host_err;
+- nfsd_stats_io_write_add(exp, *cnt);
++ nfsd_stats_io_write_add(nn, exp, *cnt);
+ fsnotify_modify(file);
+ host_err = filemap_check_wb_err(file->f_mapping, since);
+ if (host_err < 0)
--- /dev/null
+From stable+bounces-73640-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:48 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:31:01 -0400
+Subject: nfsd: make svc_stat per-network namespace instead of global
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-20-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 16fb9808ab2c99979f081987752abcbc5b092eac ]
+
+The final bit of stats that is global is the rpc svc_stat. Move this
+into the nfsd_net struct and use that everywhere instead of the global
+struct. Remove the unused global struct.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/netns.h | 4 ++++
+ fs/nfsd/nfsctl.c | 2 ++
+ fs/nfsd/nfssvc.c | 2 +-
+ fs/nfsd/stats.c | 10 ++++------
+ fs/nfsd/stats.h | 2 --
+ 5 files changed, 11 insertions(+), 9 deletions(-)
+
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -13,6 +13,7 @@
+ #include <linux/nfs4.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/siphash.h>
++#include <linux/sunrpc/stats.h>
+
+ /* Hash tables for nfs4_clientid state */
+ #define CLIENT_HASH_BITS 4
+@@ -183,6 +184,9 @@ struct nfsd_net {
+ /* Per-netns stats counters */
+ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+
++ /* sunrpc svc stats */
++ struct svc_stat nfsd_svcstats;
++
+ /* longest hash chain seen */
+ unsigned int longest_chain;
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1461,6 +1461,8 @@ static __net_init int nfsd_init_net(stru
+ retval = nfsd_stat_counters_init(nn);
+ if (retval)
+ goto out_repcache_error;
++ memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
++ nn->nfsd_svcstats.program = &nfsd_program;
+ nn->nfsd_versions = NULL;
+ nn->nfsd4_minorversions = NULL;
+ nfsd4_init_leases_net(nn);
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -664,7 +664,7 @@ int nfsd_create_serv(struct net *net)
+ if (nfsd_max_blksize == 0)
+ nfsd_max_blksize = nfsd_get_default_max_blksize();
+ nfsd_reset_versions(nn);
+- serv = svc_create_pooled(&nfsd_program, &nfsd_svcstats,
++ serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
+ nfsd_max_blksize, nfsd);
+ if (serv == NULL)
+ return -ENOMEM;
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -27,10 +27,6 @@
+
+ #include "nfsd.h"
+
+-struct svc_stat nfsd_svcstats = {
+- .program = &nfsd_program,
+-};
+-
+ static int nfsd_show(struct seq_file *seq, void *v)
+ {
+ struct net *net = PDE_DATA(file_inode(seq->file));
+@@ -56,7 +52,7 @@ static int nfsd_show(struct seq_file *se
+ seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
+
+ /* show my rpc info */
+- svc_seq_show(seq, &nfsd_svcstats);
++ svc_seq_show(seq, &nn->nfsd_svcstats);
+
+ #ifdef CONFIG_NFSD_V4
+ /* Show count for individual nfsv4 operations */
+@@ -119,7 +115,9 @@ void nfsd_stat_counters_destroy(struct n
+
+ void nfsd_proc_stat_init(struct net *net)
+ {
+- svc_proc_register(net, &nfsd_svcstats, &nfsd_proc_ops);
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++
++ svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ }
+
+ void nfsd_proc_stat_shutdown(struct net *net)
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,8 +10,6 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+
+-extern struct svc_stat nfsd_svcstats;
+-
+ int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
+ void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
--- /dev/null
+From stable+bounces-73623-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:33 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:44 -0400
+Subject: nfsd: move init of percpu reply_cache_stats counters back to nfsd_init_net
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Jeff Layton <jlayton@kernel.org>, Eirik Fuller <efuller@redhat.com>
+Message-ID: <20240905153101.59927-3-cel@kernel.org>
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit ed9ab7346e908496816cffdecd46932035f66e2e ]
+
+Commit f5f9d4a314da ("nfsd: move reply cache initialization into nfsd
+startup") moved the initialization of the reply cache into nfsd startup,
+but didn't account for the stats counters, which can be accessed before
+nfsd is ever started. The result can be a NULL pointer dereference when
+someone accesses /proc/fs/nfsd/reply_cache_stats while nfsd is still
+shut down.
+
+This is a regression and a user-triggerable oops in the right situation:
+
+- non-x86_64 arch
+- /proc/fs/nfsd is mounted in the namespace
+- nfsd is not started in the namespace
+- unprivileged user calls "cat /proc/fs/nfsd/reply_cache_stats"
+
+Although this is easy to trigger on some arches (like aarch64), on
+x86_64, calling this_cpu_ptr(NULL) evidently returns a pointer to the
+fixed_percpu_data. That struct looks just enough like a newly
+initialized percpu var to allow nfsd_reply_cache_stats_show to access
+it without Oopsing.
+
+Move the initialization of the per-net+per-cpu reply-cache counters
+back into nfsd_init_net, while leaving the rest of the reply cache
+allocations to be done at nfsd startup time.
+
+Kudos to Eirik who did most of the legwork to track this down.
+
+Cc: stable@vger.kernel.org # v6.3+
+Fixes: f5f9d4a314da ("nfsd: move reply cache initialization into nfsd startup")
+Reported-and-tested-by: Eirik Fuller <efuller@redhat.com>
+Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2215429
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: 4b14885411f7 ("nfsd: make all of the nfsd stats per-network namespace")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/cache.h | 2 ++
+ fs/nfsd/nfscache.c | 25 ++++++++++++++-----------
+ fs/nfsd/nfsctl.c | 10 +++++++++-
+ 3 files changed, 25 insertions(+), 12 deletions(-)
+
+--- a/fs/nfsd/cache.h
++++ b/fs/nfsd/cache.h
+@@ -80,6 +80,8 @@ enum {
+
+ int nfsd_drc_slab_create(void);
+ void nfsd_drc_slab_free(void);
++int nfsd_net_reply_cache_init(struct nfsd_net *nn);
++void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
+ int nfsd_reply_cache_init(struct nfsd_net *);
+ void nfsd_reply_cache_shutdown(struct nfsd_net *);
+ int nfsd_cache_lookup(struct svc_rqst *);
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -148,12 +148,23 @@ void nfsd_drc_slab_free(void)
+ kmem_cache_destroy(drc_slab);
+ }
+
+-static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
++/**
++ * nfsd_net_reply_cache_init - per net namespace reply cache set-up
++ * @nn: nfsd_net being initialized
++ *
++ * Returns zero on succes; otherwise a negative errno is returned.
++ */
++int nfsd_net_reply_cache_init(struct nfsd_net *nn)
+ {
+ return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+ }
+
+-static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
++/**
++ * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
++ * @nn: nfsd_net being freed
++ *
++ */
++void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
+ {
+ nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+ }
+@@ -169,16 +180,12 @@ int nfsd_reply_cache_init(struct nfsd_ne
+ hashsize = nfsd_hashsize(nn->max_drc_entries);
+ nn->maskbits = ilog2(hashsize);
+
+- status = nfsd_reply_cache_stats_init(nn);
+- if (status)
+- goto out_nomem;
+-
+ nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
+ nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
+ nn->nfsd_reply_cache_shrinker.seeks = 1;
+ status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
+ if (status)
+- goto out_stats_destroy;
++ return status;
+
+ nn->drc_hashtbl = kvzalloc(array_size(hashsize,
+ sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
+@@ -194,9 +201,6 @@ int nfsd_reply_cache_init(struct nfsd_ne
+ return 0;
+ out_shrinker:
+ unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+-out_stats_destroy:
+- nfsd_reply_cache_stats_destroy(nn);
+-out_nomem:
+ printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
+ return -ENOMEM;
+ }
+@@ -216,7 +220,6 @@ void nfsd_reply_cache_shutdown(struct nf
+ rp, nn);
+ }
+ }
+- nfsd_reply_cache_stats_destroy(nn);
+
+ kvfree(nn->drc_hashtbl);
+ nn->drc_hashtbl = NULL;
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1458,6 +1458,9 @@ static __net_init int nfsd_init_net(stru
+ retval = nfsd_idmap_init(net);
+ if (retval)
+ goto out_idmap_error;
++ retval = nfsd_net_reply_cache_init(nn);
++ if (retval)
++ goto out_repcache_error;
+ nn->nfsd_versions = NULL;
+ nn->nfsd4_minorversions = NULL;
+ nfsd4_init_leases_net(nn);
+@@ -1466,6 +1469,8 @@ static __net_init int nfsd_init_net(stru
+
+ return 0;
+
++out_repcache_error:
++ nfsd_idmap_shutdown(net);
+ out_idmap_error:
+ nfsd_export_shutdown(net);
+ out_export_error:
+@@ -1474,9 +1479,12 @@ out_export_error:
+
+ static __net_exit void nfsd_exit_net(struct net *net)
+ {
++ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++
++ nfsd_net_reply_cache_destroy(nn);
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+- nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
++ nfsd_netns_free_versions(nn);
+ }
+
+ static struct pernet_operations nfsd_net_ops = {
--- /dev/null
+From stable+bounces-73622-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:30 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:43 -0400
+Subject: nfsd: move reply cache initialization into nfsd startup
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Jeff Layton <jlayton@kernel.org>, Dai Ngo <dai.ngo@oracle.com>
+Message-ID: <20240905153101.59927-2-cel@kernel.org>
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit f5f9d4a314da88c0a5faa6d168bf69081b7a25ae ]
+
+There's no need to start the reply cache before nfsd is up and running,
+and doing so means that we register a shrinker for every net namespace
+instead of just the ones where nfsd is running.
+
+Move it to the per-net nfsd startup instead.
+
+Reported-by: Dai Ngo <dai.ngo@oracle.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: ed9ab7346e90 ("nfsd: move init of percpu reply_cache_stats counters back to nfsd_init_net")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c | 8 --------
+ fs/nfsd/nfssvc.c | 10 +++++++++-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1461,16 +1461,11 @@ static __net_init int nfsd_init_net(stru
+ nn->nfsd_versions = NULL;
+ nn->nfsd4_minorversions = NULL;
+ nfsd4_init_leases_net(nn);
+- retval = nfsd_reply_cache_init(nn);
+- if (retval)
+- goto out_cache_error;
+ get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ seqlock_init(&nn->writeverf_lock);
+
+ return 0;
+
+-out_cache_error:
+- nfsd_idmap_shutdown(net);
+ out_idmap_error:
+ nfsd_export_shutdown(net);
+ out_export_error:
+@@ -1479,9 +1474,6 @@ out_export_error:
+
+ static __net_exit void nfsd_exit_net(struct net *net)
+ {
+- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+- nfsd_reply_cache_shutdown(nn);
+ nfsd_idmap_shutdown(net);
+ nfsd_export_shutdown(net);
+ nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -427,16 +427,23 @@ static int nfsd_startup_net(struct net *
+ ret = nfsd_file_cache_start_net(net);
+ if (ret)
+ goto out_lockd;
+- ret = nfs4_state_start_net(net);
++
++ ret = nfsd_reply_cache_init(nn);
+ if (ret)
+ goto out_filecache;
+
++ ret = nfs4_state_start_net(net);
++ if (ret)
++ goto out_reply_cache;
++
+ #ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ nfsd4_ssc_init_umount_work(nn);
+ #endif
+ nn->nfsd_net_up = true;
+ return 0;
+
++out_reply_cache:
++ nfsd_reply_cache_shutdown(nn);
+ out_filecache:
+ nfsd_file_cache_shutdown_net(net);
+ out_lockd:
+@@ -454,6 +461,7 @@ static void nfsd_shutdown_net(struct net
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs4_state_shutdown_net(net);
++ nfsd_reply_cache_shutdown(nn);
+ nfsd_file_cache_shutdown_net(net);
+ if (nn->lockd_up) {
+ lockd_down(net);
--- /dev/null
+From stable+bounces-73624-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:37 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:45 -0400
+Subject: NFSD: Refactor nfsd_reply_cache_free_locked()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-4-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 35308e7f0fc3942edc87d9c6dc78c4a096428957 ]
+
+To reduce contention on the bucket locks, we must avoid calling
+kfree() while each bucket lock is held.
+
+Start by refactoring nfsd_reply_cache_free_locked() into a helper
+that removes an entry from the bucket (and must therefore run under
+the lock) and a second helper that frees the entry (which does not
+need to hold the lock).
+
+For readability, rename the helpers nfsd_cacherep_<verb>.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: a9507f6af145 ("NFSD: Replace nfsd_prune_bucket()")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfscache.c | 27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -110,21 +110,33 @@ nfsd_reply_cache_alloc(struct svc_rqst *
+ return rp;
+ }
+
++static void nfsd_cacherep_free(struct svc_cacherep *rp)
++{
++ if (rp->c_type == RC_REPLBUFF)
++ kfree(rp->c_replvec.iov_base);
++ kmem_cache_free(drc_slab, rp);
++}
++
+ static void
+-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+- struct nfsd_net *nn)
++nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++ struct svc_cacherep *rp)
+ {
+- if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
++ if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
+ nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
+- kfree(rp->c_replvec.iov_base);
+- }
+ if (rp->c_state != RC_UNUSED) {
+ rb_erase(&rp->c_node, &b->rb_head);
+ list_del(&rp->c_lru);
+ atomic_dec(&nn->num_drc_entries);
+ nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
+ }
+- kmem_cache_free(drc_slab, rp);
++}
++
++static void
++nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
++ struct nfsd_net *nn)
++{
++ nfsd_cacherep_unlink_locked(nn, b, rp);
++ nfsd_cacherep_free(rp);
+ }
+
+ static void
+@@ -132,8 +144,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bu
+ struct nfsd_net *nn)
+ {
+ spin_lock(&b->cache_lock);
+- nfsd_reply_cache_free_locked(b, rp, nn);
++ nfsd_cacherep_unlink_locked(nn, b, rp);
+ spin_unlock(&b->cache_lock);
++ nfsd_cacherep_free(rp);
+ }
+
+ int nfsd_drc_slab_create(void)
--- /dev/null
+From stable+bounces-73627-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:45 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:48 -0400
+Subject: NFSD: Refactor the duplicate reply cache shrinker
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-7-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit c135e1269f34dfdea4bd94c11060c83a3c0b3c12 ]
+
+Avoid holding the bucket lock while freeing cache entries. This
+change also caps the number of entries that are freed when the
+shrinker calls to reduce the shrinker's impact on the cache's
+effectiveness.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfscache.c | 82 +++++++++++++++++++++++++----------------------------
+ 1 file changed, 39 insertions(+), 43 deletions(-)
+
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -309,68 +309,64 @@ nfsd_prune_bucket_locked(struct nfsd_net
+ }
+ }
+
+-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+- unsigned int max)
++/**
++ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Returns the total number of entries in the duplicate reply cache. To
++ * keep things simple and quick, this is not the number of expired entries
++ * in the cache (ie, the number that would be removed by a call to
++ * nfsd_reply_cache_scan).
++ */
++static unsigned long
++nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
+- struct svc_cacherep *rp, *tmp;
+- long freed = 0;
++ struct nfsd_net *nn = container_of(shrink,
++ struct nfsd_net, nfsd_reply_cache_shrinker);
+
+- list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
+- /*
+- * Don't free entries attached to calls that are still
+- * in-progress, but do keep scanning the list.
+- */
+- if (rp->c_state == RC_INPROG)
+- continue;
+- if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
+- time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
+- break;
+- nfsd_reply_cache_free_locked(b, rp, nn);
+- if (max && freed++ > max)
+- break;
+- }
+- return freed;
++ return atomic_read(&nn->num_drc_entries);
+ }
+
+-/*
+- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+- * Also prune the oldest ones when the total exceeds the max number of entries.
++/**
++ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Free expired entries on each bucket's LRU list until we've released
++ * nr_to_scan freed objects. Nothing will be released if the cache
++ * has not exceeded it's max_drc_entries limit.
++ *
++ * Returns the number of entries released by this call.
+ */
+-static long
+-prune_cache_entries(struct nfsd_net *nn)
++static unsigned long
++nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
++ struct nfsd_net *nn = container_of(shrink,
++ struct nfsd_net, nfsd_reply_cache_shrinker);
++ unsigned long freed = 0;
++ LIST_HEAD(dispose);
+ unsigned int i;
+- long freed = 0;
+
+ for (i = 0; i < nn->drc_hashsize; i++) {
+ struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
+
+ if (list_empty(&b->lru_head))
+ continue;
++
+ spin_lock(&b->cache_lock);
+- freed += prune_bucket(b, nn, 0);
++ nfsd_prune_bucket_locked(nn, b, 0, &dispose);
+ spin_unlock(&b->cache_lock);
+- }
+- return freed;
+-}
+
+-static unsigned long
+-nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+-{
+- struct nfsd_net *nn = container_of(shrink,
+- struct nfsd_net, nfsd_reply_cache_shrinker);
++ freed += nfsd_cacherep_dispose(&dispose);
++ if (freed > sc->nr_to_scan)
++ break;
++ }
+
+- return atomic_read(&nn->num_drc_entries);
++ trace_nfsd_drc_gc(nn, freed);
++ return freed;
+ }
+
+-static unsigned long
+-nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+-{
+- struct nfsd_net *nn = container_of(shrink,
+- struct nfsd_net, nfsd_reply_cache_shrinker);
+-
+- return prune_cache_entries(nn);
+-}
+ /*
+ * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+ */
--- /dev/null
+From stable+bounces-73639-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:45 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:31:00 -0400
+Subject: nfsd: remove nfsd_stats, make th_cnt a global counter
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-19-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit e41ee44cc6a473b1f414031782c3b4283d7f3e5f ]
+
+This is the last global stat, take it out of the nfsd_stats struct and
+make it a global part of nfsd, report it the same as always.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsd.h | 1 +
+ fs/nfsd/nfssvc.c | 5 +++--
+ fs/nfsd/stats.c | 3 +--
+ fs/nfsd/stats.h | 6 ------
+ 4 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -69,6 +69,7 @@ extern struct mutex nfsd_mutex;
+ extern spinlock_t nfsd_drc_lock;
+ extern unsigned long nfsd_drc_max_mem;
+ extern unsigned long nfsd_drc_mem_used;
++extern atomic_t nfsd_th_cnt; /* number of available threads */
+
+ extern const struct seq_operations nfs_exports_op;
+
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -34,6 +34,7 @@
+
+ #define NFSDDBG_FACILITY NFSDDBG_SVC
+
++atomic_t nfsd_th_cnt = ATOMIC_INIT(0);
+ extern struct svc_program nfsd_program;
+ static int nfsd(void *vrqstp);
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+@@ -935,7 +936,7 @@ nfsd(void *vrqstp)
+
+ current->fs->umask = 0;
+
+- atomic_inc(&nfsdstats.th_cnt);
++ atomic_inc(&nfsd_th_cnt);
+
+ set_freezable();
+
+@@ -959,7 +960,7 @@ nfsd(void *vrqstp)
+ validate_process_creds();
+ }
+
+- atomic_dec(&nfsdstats.th_cnt);
++ atomic_dec(&nfsd_th_cnt);
+
+ out:
+ /* Release the thread */
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -27,7 +27,6 @@
+
+ #include "nfsd.h"
+
+-struct nfsd_stats nfsdstats;
+ struct svc_stat nfsd_svcstats = {
+ .program = &nfsd_program,
+ };
+@@ -47,7 +46,7 @@ static int nfsd_show(struct seq_file *se
+ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
+
+ /* thread usage: */
+- seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
++ seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt));
+
+ /* deprecated thread usage histogram stats */
+ for (i = 0; i < 10; i++)
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,12 +10,6 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+
+-struct nfsd_stats {
+- atomic_t th_cnt; /* number of available threads */
+-};
+-
+-extern struct nfsd_stats nfsdstats;
+-
+ extern struct svc_stat nfsd_svcstats;
+
+ int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
--- /dev/null
+From stable+bounces-73635-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:29 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:57 -0400
+Subject: nfsd: rename NFSD_NET_* to NFSD_STATS_*
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-16-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit d98416cc2154053950610bb6880911e3dcbdf8c5 ]
+
+We're going to merge the stats all into per network namespace in
+subsequent patches, rename these nn counters to be consistent with the
+rest of the stats.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/netns.h | 4 ++--
+ fs/nfsd/nfscache.c | 4 ++--
+ fs/nfsd/stats.h | 6 +++---
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -25,9 +25,9 @@ struct nfsd4_client_tracking_ops;
+
+ enum {
+ /* cache misses due only to checksum comparison failures */
+- NFSD_NET_PAYLOAD_MISSES,
++ NFSD_STATS_PAYLOAD_MISSES,
+ /* amount of memory (in bytes) currently consumed by the DRC */
+- NFSD_NET_DRC_MEM_USAGE,
++ NFSD_STATS_DRC_MEM_USAGE,
+ NFSD_NET_COUNTERS_NUM
+ };
+
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -673,7 +673,7 @@ int nfsd_reply_cache_stats_show(struct s
+ atomic_read(&nn->num_drc_entries));
+ seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
+ seq_printf(m, "mem usage: %lld\n",
+- percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
+ seq_printf(m, "cache hits: %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
+ seq_printf(m, "cache misses: %lld\n",
+@@ -681,7 +681,7 @@ int nfsd_reply_cache_stats_show(struct s
+ seq_printf(m, "not cached: %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
+ seq_printf(m, "payload misses: %lld\n",
+- percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
++ percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
+ seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
+ seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
+ return 0;
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -80,17 +80,17 @@ static inline void nfsd_stats_io_write_a
+
+ static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
+ {
+- percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
++ percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]);
+ }
+
+ static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
+ {
+- percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++ percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+
+ static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
+ {
+- percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++ percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+
+ #endif /* _NFSD_STATS_H */
--- /dev/null
+From stable+bounces-73625-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:41 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:46 -0400
+Subject: NFSD: Rename nfsd_reply_cache_alloc()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-5-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit ff0d169329768c1102b7b07eebe5a9839aa1c143 ]
+
+For readability, rename to match the other helpers.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: 4b14885411f7 ("nfsd: make all of the nfsd stats per-network namespace")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfscache.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -85,8 +85,8 @@ nfsd_hashsize(unsigned int limit)
+ }
+
+ static struct svc_cacherep *
+-nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
+- struct nfsd_net *nn)
++nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
++ struct nfsd_net *nn)
+ {
+ struct svc_cacherep *rp;
+
+@@ -457,7 +457,7 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ * preallocate an entry.
+ */
+ nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+- rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
++ rp = nfsd_cacherep_alloc(rqstp, csum, nn);
+ if (!rp)
+ goto out;
+
--- /dev/null
+From stable+bounces-73626-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:43 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:47 -0400
+Subject: NFSD: Replace nfsd_prune_bucket()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-6-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit a9507f6af1450ed26a4a36d979af518f5bb21e5d ]
+
+Enable nfsd_prune_bucket() to drop the bucket lock while calling
+kfree(). Use the same pattern that Jeff recently introduced in the
+NFSD filecache.
+
+A few percpu operations are moved outside the lock since they
+temporarily disable local IRQs which is expensive and does not
+need to be done while the lock is held.
+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: c135e1269f34 ("NFSD: Refactor the duplicate reply cache shrinker")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfscache.c | 78 ++++++++++++++++++++++++++++++++++++++++++-----------
+ fs/nfsd/trace.h | 22 ++++++++++++++
+ 2 files changed, 85 insertions(+), 15 deletions(-)
+
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -117,6 +117,21 @@ static void nfsd_cacherep_free(struct sv
+ kmem_cache_free(drc_slab, rp);
+ }
+
++static unsigned long
++nfsd_cacherep_dispose(struct list_head *dispose)
++{
++ struct svc_cacherep *rp;
++ unsigned long freed = 0;
++
++ while (!list_empty(dispose)) {
++ rp = list_first_entry(dispose, struct svc_cacherep, c_lru);
++ list_del(&rp->c_lru);
++ nfsd_cacherep_free(rp);
++ freed++;
++ }
++ return freed;
++}
++
+ static void
+ nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+ struct svc_cacherep *rp)
+@@ -259,6 +274,41 @@ nfsd_cache_bucket_find(__be32 xid, struc
+ return &nn->drc_hashtbl[hash];
+ }
+
++/*
++ * Remove and return no more than @max expired entries in bucket @b.
++ * If @max is zero, do not limit the number of removed entries.
++ */
++static void
++nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++ unsigned int max, struct list_head *dispose)
++{
++ unsigned long expiry = jiffies - RC_EXPIRE;
++ struct svc_cacherep *rp, *tmp;
++ unsigned int freed = 0;
++
++ lockdep_assert_held(&b->cache_lock);
++
++ /* The bucket LRU is ordered oldest-first. */
++ list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
++ /*
++ * Don't free entries attached to calls that are still
++ * in-progress, but do keep scanning the list.
++ */
++ if (rp->c_state == RC_INPROG)
++ continue;
++
++ if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
++ time_before(expiry, rp->c_timestamp))
++ break;
++
++ nfsd_cacherep_unlink_locked(nn, b, rp);
++ list_add(&rp->c_lru, dispose);
++
++ if (max && ++freed > max)
++ break;
++ }
++}
++
+ static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+ unsigned int max)
+ {
+@@ -282,11 +332,6 @@ static long prune_bucket(struct nfsd_drc
+ return freed;
+ }
+
+-static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
+-{
+- return prune_bucket(b, nn, 3);
+-}
+-
+ /*
+ * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+ * Also prune the oldest ones when the total exceeds the max number of entries.
+@@ -442,6 +487,8 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ __wsum csum;
+ struct nfsd_drc_bucket *b;
+ int type = rqstp->rq_cachetype;
++ unsigned long freed;
++ LIST_HEAD(dispose);
+ int rtn = RC_DOIT;
+
+ rqstp->rq_cacherep = NULL;
+@@ -466,20 +513,18 @@ int nfsd_cache_lookup(struct svc_rqst *r
+ found = nfsd_cache_insert(b, rp, nn);
+ if (found != rp)
+ goto found_entry;
+-
+- nfsd_stats_rc_misses_inc();
+ rqstp->rq_cacherep = rp;
+ rp->c_state = RC_INPROG;
++ nfsd_prune_bucket_locked(nn, b, 3, &dispose);
++ spin_unlock(&b->cache_lock);
+
++ freed = nfsd_cacherep_dispose(&dispose);
++ trace_nfsd_drc_gc(nn, freed);
++
++ nfsd_stats_rc_misses_inc();
+ atomic_inc(&nn->num_drc_entries);
+ nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
+-
+- nfsd_prune_bucket(b, nn);
+-
+-out_unlock:
+- spin_unlock(&b->cache_lock);
+-out:
+- return rtn;
++ goto out;
+
+ found_entry:
+ /* We found a matching entry which is either in progress or done. */
+@@ -517,7 +562,10 @@ found_entry:
+
+ out_trace:
+ trace_nfsd_drc_found(nn, rqstp, rtn);
+- goto out_unlock;
++out_unlock:
++ spin_unlock(&b->cache_lock);
++out:
++ return rtn;
+ }
+
+ /**
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -1171,6 +1171,28 @@ TRACE_EVENT(nfsd_drc_mismatch,
+ __entry->ingress)
+ );
+
++TRACE_EVENT_CONDITION(nfsd_drc_gc,
++ TP_PROTO(
++ const struct nfsd_net *nn,
++ unsigned long freed
++ ),
++ TP_ARGS(nn, freed),
++ TP_CONDITION(freed > 0),
++ TP_STRUCT__entry(
++ __field(unsigned long long, boot_time)
++ __field(unsigned long, freed)
++ __field(int, total)
++ ),
++ TP_fast_assign(
++ __entry->boot_time = nn->boot_time;
++ __entry->freed = freed;
++ __entry->total = atomic_read(&nn->num_drc_entries);
++ ),
++ TP_printk("boot_time=%16llx total=%d freed=%lu",
++ __entry->boot_time, __entry->total, __entry->freed
++ )
++);
++
+ TRACE_EVENT(nfsd_cb_args,
+ TP_PROTO(
+ const struct nfs4_client *clp,
--- /dev/null
+From stable+bounces-73628-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:50 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:50 -0400
+Subject: NFSD: Rewrite synopsis of nfsd_percpu_counters_init()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Chuck Lever <chuck.lever@oracle.com>, Amir Goldstein <amir73il@gmail.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-9-cel@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 5ec39944f874e1ecc09f624a70dfaa8ac3bf9d08 ]
+
+In function ‘export_stats_init’,
+ inlined from ‘svc_export_alloc’ at fs/nfsd/export.c:866:6:
+fs/nfsd/export.c:337:16: warning: ‘nfsd_percpu_counters_init’ accessing 40 bytes in a region of size 0 [-Wstringop-overflow=]
+ 337 | return nfsd_percpu_counters_init(&stats->counter, EXP_STATS_COUNTERS_NUM);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+fs/nfsd/export.c:337:16: note: referencing argument 1 of type ‘struct percpu_counter[0]’
+fs/nfsd/stats.h: In function ‘svc_export_alloc’:
+fs/nfsd/stats.h:40:5: note: in a call to function ‘nfsd_percpu_counters_init’
+ 40 | int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~
+
+Cc: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Stable-dep-of: 93483ac5fec6 ("nfsd: expose /proc/net/sunrpc/nfsd in net namespaces")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/stats.c | 2 +-
+ fs/nfsd/stats.h | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -74,7 +74,7 @@ static int nfsd_show(struct seq_file *se
+
+ DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
+
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
+ {
+ int i, err = 0;
+
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -36,9 +36,9 @@ extern struct nfsd_stats nfsdstats;
+
+ extern struct svc_stat nfsd_svcstats;
+
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
+ int nfsd_stat_init(void);
+ void nfsd_stat_shutdown(void);
+
--- /dev/null
+From stable+bounces-73629-greg=kroah.com@vger.kernel.org Thu Sep 5 17:31:52 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:49 -0400
+Subject: NFSD: simplify error paths in nfsd_svc()
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, NeilBrown <neilb@suse.de>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-8-cel@kernel.org>
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit bf32075256e9dd9c6b736859e2c5813981339908 ]
+
+The error paths in nfsd_svc() are needlessly complex and can result in a
+final call to svc_put() without nfsd_last_thread() being called. This
+results in the listening sockets not being closed properly.
+
+The per-netns setup provided by nfsd_startup_new() and removed by
+nfsd_shutdown_net() is needed precisely when there are running threads.
+So we don't need nfsd_up_before. We don't need to know if it *was* up.
+We only need to know if any threads are left. If none are, then we must
+call nfsd_shutdown_net(). But we don't need to do that explicitly as
+nfsd_last_thread() does that for us.
+
+So simply call nfsd_last_thread() before the last svc_put() if there are
+no running threads. That will always do the right thing.
+
+Also discard:
+ pr_info("nfsd: last server has exited, flushing export cache\n");
+It may not be true if an attempt to start the first server failed, and
+it isn't particularly helpful and it simply reports normal behaviour.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfssvc.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -567,7 +567,6 @@ void nfsd_last_thread(struct net *net)
+ return;
+
+ nfsd_shutdown_net(net);
+- pr_info("nfsd: last server has exited, flushing export cache\n");
+ nfsd_export_flush(net);
+ }
+
+@@ -782,7 +781,6 @@ int
+ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ {
+ int error;
+- bool nfsd_up_before;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
+
+@@ -802,8 +800,6 @@ nfsd_svc(int nrservs, struct net *net, c
+ error = nfsd_create_serv(net);
+ if (error)
+ goto out;
+-
+- nfsd_up_before = nn->nfsd_net_up;
+ serv = nn->nfsd_serv;
+
+ error = nfsd_startup_net(net, cred);
+@@ -811,17 +807,15 @@ nfsd_svc(int nrservs, struct net *net, c
+ goto out_put;
+ error = svc_set_num_threads(serv, NULL, nrservs);
+ if (error)
+- goto out_shutdown;
++ goto out_put;
+ error = serv->sv_nrthreads;
+- if (error == 0)
+- nfsd_last_thread(net);
+-out_shutdown:
+- if (error < 0 && !nfsd_up_before)
+- nfsd_shutdown_net(net);
+ out_put:
+ /* Threads now hold service active */
+ if (xchg(&nn->keep_active, 0))
+ svc_put(serv);
++
++ if (serv->sv_nrthreads == 0)
++ nfsd_last_thread(net);
+ svc_put(serv);
+ out:
+ mutex_unlock(&nfsd_mutex);
--- /dev/null
+From stable+bounces-73632-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:08 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:53 -0400
+Subject: nfsd: stop setting ->pg_stats for unused stats
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-12-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit a2214ed588fb3c5b9824a21cff870482510372bb ]
+
+A lot of places are setting a blank svc_stats in ->pg_stats and never
+utilizing these stats. Remove all of these extra structs as we're not
+reporting these stats anywhere.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/lockd/svc.c | 3 ---
+ fs/nfs/callback.c | 3 ---
+ fs/nfsd/nfssvc.c | 5 -----
+ 3 files changed, 11 deletions(-)
+
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -759,8 +759,6 @@ static const struct svc_version *nlmsvc_
+ #endif
+ };
+
+-static struct svc_stat nlmsvc_stats;
+-
+ #define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
+ static struct svc_program nlmsvc_program = {
+ .pg_prog = NLM_PROGRAM, /* program number */
+@@ -768,7 +766,6 @@ static struct svc_program nlmsvc_program
+ .pg_vers = nlmsvc_version, /* version table */
+ .pg_name = "lockd", /* service name */
+ .pg_class = "nfsd", /* share authentication with nfsd */
+- .pg_stats = &nlmsvc_stats, /* stats table */
+ .pg_authenticate = &lockd_authenticate, /* export authentication */
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -407,15 +407,12 @@ static const struct svc_version *nfs4_ca
+ [4] = &nfs4_callback_version4,
+ };
+
+-static struct svc_stat nfs4_callback_stats;
+-
+ static struct svc_program nfs4_callback_program = {
+ .pg_prog = NFS4_CALLBACK, /* RPC service number */
+ .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
+ .pg_vers = nfs4_callback_version, /* version table */
+ .pg_name = "NFSv4 callback", /* service name */
+ .pg_class = "nfs", /* authentication class */
+- .pg_stats = &nfs4_callback_stats,
+ .pg_authenticate = nfs_callback_authenticate,
+ .pg_init_request = svc_generic_init_request,
+ .pg_rpcbind_set = svc_generic_rpcbind_set,
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -89,7 +89,6 @@ unsigned long nfsd_drc_max_mem;
+ unsigned long nfsd_drc_mem_used;
+
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+-static struct svc_stat nfsd_acl_svcstats;
+ static const struct svc_version *nfsd_acl_version[] = {
+ # if defined(CONFIG_NFSD_V2_ACL)
+ [2] = &nfsd_acl_version2,
+@@ -108,15 +107,11 @@ static struct svc_program nfsd_acl_progr
+ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+- .pg_stats = &nfsd_acl_svcstats,
+ .pg_authenticate = &svc_set_client,
+ .pg_init_request = nfsd_acl_init_request,
+ .pg_rpcbind_set = nfsd_acl_rpcbind_set,
+ };
+
+-static struct svc_stat nfsd_acl_svcstats = {
+- .program = &nfsd_acl_program,
+-};
+ #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+
+ static const struct svc_version *nfsd_version[] = {
btrfs-fix-use-after-free-after-failure-to-create-a-snapshot.patch
mptcp-pr_debug-add-missing-n-at-the-end.patch
mptcp-pm-avoid-possible-uaf-when-selecting-endp.patch
+nfsd-move-reply-cache-initialization-into-nfsd-startup.patch
+nfsd-move-init-of-percpu-reply_cache_stats-counters-back-to-nfsd_init_net.patch
+nfsd-refactor-nfsd_reply_cache_free_locked.patch
+nfsd-rename-nfsd_reply_cache_alloc.patch
+nfsd-replace-nfsd_prune_bucket.patch
+nfsd-refactor-the-duplicate-reply-cache-shrinker.patch
+nfsd-simplify-error-paths-in-nfsd_svc.patch
+nfsd-rewrite-synopsis-of-nfsd_percpu_counters_init.patch
+nfsd-fix-frame-size-warning-in-svc_export_parse.patch
+sunrpc-don-t-change-sv_stats-if-it-doesn-t-exist.patch
+nfsd-stop-setting-pg_stats-for-unused-stats.patch
+sunrpc-pass-in-the-sv_stats-struct-through-svc_create_pooled.patch
+sunrpc-remove-pg_stats-from-svc_program.patch
+sunrpc-use-the-struct-net-as-the-svc-proc-private.patch
+nfsd-rename-nfsd_net_-to-nfsd_stats_.patch
+nfsd-expose-proc-net-sunrpc-nfsd-in-net-namespaces.patch
+nfsd-make-all-of-the-nfsd-stats-per-network-namespace.patch
+nfsd-remove-nfsd_stats-make-th_cnt-a-global-counter.patch
+nfsd-make-svc_stat-per-network-namespace-instead-of-global.patch
--- /dev/null
+From stable+bounces-73631-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:02 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:52 -0400
+Subject: sunrpc: don't change ->sv_stats if it doesn't exist
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-11-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit ab42f4d9a26f1723dcfd6c93fcf768032b2bb5e7 ]
+
+We check for the existence of ->sv_stats elsewhere except in the core
+processing code. It appears that only nfsd actual exports these values
+anywhere, everybody else just has a write only copy of sv_stats in their
+svc_program. Add a check for ->sv_stats before every adjustment to
+allow us to eliminate the stats struct from all the users who don't
+report the stats.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+[ cel: adjusted to apply to v5.10.y ]
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/svc.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1355,7 +1355,8 @@ svc_process_common(struct svc_rqst *rqst
+ goto err_bad_proc;
+
+ /* Syntactic check complete */
+- serv->sv_stats->rpccnt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpccnt++;
+ trace_svc_process(rqstp, progp->pg_name);
+
+ /* Build the reply header. */
+@@ -1421,7 +1422,8 @@ err_short_len:
+ goto close_xprt;
+
+ err_bad_rpc:
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ svc_putnl(resv, 1); /* REJECT */
+ svc_putnl(resv, 0); /* RPC_MISMATCH */
+ svc_putnl(resv, 2); /* Only RPCv2 supported */
+@@ -1434,7 +1436,8 @@ err_release_bad_auth:
+ err_bad_auth:
+ dprintk("svc: authentication failed (%d)\n",
+ be32_to_cpu(rqstp->rq_auth_stat));
+- serv->sv_stats->rpcbadauth++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadauth++;
+ /* Restore write pointer to location of accept status: */
+ xdr_ressize_check(rqstp, reply_statp);
+ svc_putnl(resv, 1); /* REJECT */
+@@ -1444,7 +1447,8 @@ err_bad_auth:
+
+ err_bad_prog:
+ dprintk("svc: unknown program %d\n", prog);
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ svc_putnl(resv, RPC_PROG_UNAVAIL);
+ goto sendit;
+
+@@ -1452,7 +1456,8 @@ err_bad_vers:
+ svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
+ rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
+
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ svc_putnl(resv, RPC_PROG_MISMATCH);
+ svc_putnl(resv, process.mismatch.lovers);
+ svc_putnl(resv, process.mismatch.hivers);
+@@ -1461,7 +1466,8 @@ err_bad_vers:
+ err_bad_proc:
+ svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
+
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ svc_putnl(resv, RPC_PROC_UNAVAIL);
+ goto sendit;
+
+@@ -1470,7 +1476,8 @@ err_garbage:
+
+ rpc_stat = rpc_garbage_args;
+ err_bad:
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ svc_putnl(resv, ntohl(rpc_stat));
+ goto sendit;
+ }
+@@ -1505,7 +1512,8 @@ svc_process(struct svc_rqst *rqstp)
+ if (dir != 0) {
+ /* direction != CALL */
+ svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
+- serv->sv_stats->rpcbadfmt++;
++ if (serv->sv_stats)
++ serv->sv_stats->rpcbadfmt++;
+ goto out_drop;
+ }
+
--- /dev/null
+From stable+bounces-73633-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:11 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:54 -0400
+Subject: sunrpc: pass in the sv_stats struct through svc_create_pooled
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-13-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit f094323867668d50124886ad884b665de7319537 ]
+
+Since only one service actually reports the rpc stats there's not much
+of a reason to have a pointer to it in the svc_program struct. Adjust
+the svc_create_pooled function to take the sv_stats as an argument and
+pass the struct through there as desired instead of getting it from the
+svc_program->pg_stats.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+[ cel: adjusted to apply to v5.10.y ]
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfssvc.c | 3 ++-
+ include/linux/sunrpc/svc.h | 4 +++-
+ net/sunrpc/svc.c | 12 +++++++-----
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -664,7 +664,8 @@ int nfsd_create_serv(struct net *net)
+ if (nfsd_max_blksize == 0)
+ nfsd_max_blksize = nfsd_get_default_max_blksize();
+ nfsd_reset_versions(nn);
+- serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd);
++ serv = svc_create_pooled(&nfsd_program, &nfsd_svcstats,
++ nfsd_max_blksize, nfsd);
+ if (serv == NULL)
+ return -ENOMEM;
+
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -484,7 +484,9 @@ void svc_rqst_replace_page(struct sv
+ struct page *page);
+ void svc_rqst_free(struct svc_rqst *);
+ void svc_exit_thread(struct svc_rqst *);
+-struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
++struct svc_serv * svc_create_pooled(struct svc_program *prog,
++ struct svc_stat *stats,
++ unsigned int bufsize,
+ int (*threadfn)(void *data));
+ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+ int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -445,8 +445,8 @@ __svc_init_bc(struct svc_serv *serv)
+ * Create an RPC service
+ */
+ static struct svc_serv *
+-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+- int (*threadfn)(void *data))
++__svc_create(struct svc_program *prog, struct svc_stat *stats,
++ unsigned int bufsize, int npools, int (*threadfn)(void *data))
+ {
+ struct svc_serv *serv;
+ unsigned int vers;
+@@ -458,7 +458,7 @@ __svc_create(struct svc_program *prog, u
+ serv->sv_name = prog->pg_name;
+ serv->sv_program = prog;
+ kref_init(&serv->sv_refcnt);
+- serv->sv_stats = prog->pg_stats;
++ serv->sv_stats = stats;
+ if (bufsize > RPCSVC_MAXPAYLOAD)
+ bufsize = RPCSVC_MAXPAYLOAD;
+ serv->sv_max_payload = bufsize? bufsize : 4096;
+@@ -520,26 +520,28 @@ __svc_create(struct svc_program *prog, u
+ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
+ int (*threadfn)(void *data))
+ {
+- return __svc_create(prog, bufsize, 1, threadfn);
++ return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ }
+ EXPORT_SYMBOL_GPL(svc_create);
+
+ /**
+ * svc_create_pooled - Create an RPC service with pooled threads
+ * @prog: the RPC program the new service will handle
++ * @stats: the stats struct if desired
+ * @bufsize: maximum message size for @prog
+ * @threadfn: a function to service RPC requests for @prog
+ *
+ * Returns an instantiated struct svc_serv object or NULL.
+ */
+ struct svc_serv *svc_create_pooled(struct svc_program *prog,
++ struct svc_stat *stats,
+ unsigned int bufsize,
+ int (*threadfn)(void *data))
+ {
+ struct svc_serv *serv;
+ unsigned int npools = svc_pool_map_get();
+
+- serv = __svc_create(prog, bufsize, npools, threadfn);
++ serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ if (!serv)
+ goto out_err;
+ return serv;
--- /dev/null
+From stable+bounces-73634-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:19 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:55 -0400
+Subject: sunrpc: remove ->pg_stats from svc_program
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-14-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 3f6ef182f144dcc9a4d942f97b6a8ed969f13c95 ]
+
+Now that this isn't used anywhere, remove it.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+[ cel: adjusted to apply to v5.10.y ]
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfssvc.c | 1 -
+ include/linux/sunrpc/svc.h | 1 -
+ 2 files changed, 2 deletions(-)
+
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -136,7 +136,6 @@ struct svc_program nfsd_program = {
+ .pg_vers = nfsd_version, /* version table */
+ .pg_name = "nfsd", /* program name */
+ .pg_class = "nfsd", /* authentication class */
+- .pg_stats = &nfsd_svcstats, /* version table */
+ .pg_authenticate = &svc_set_client, /* export authentication */
+ .pg_init_request = nfsd_init_request,
+ .pg_rpcbind_set = nfsd_rpcbind_set,
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -410,7 +410,6 @@ struct svc_program {
+ const struct svc_version **pg_vers; /* version array */
+ char * pg_name; /* service name */
+ char * pg_class; /* class name: services sharing authentication */
+- struct svc_stat * pg_stats; /* rpc statistics */
+ int (*pg_authenticate)(struct svc_rqst *);
+ __be32 (*pg_init_request)(struct svc_rqst *,
+ const struct svc_program *,
--- /dev/null
+From stable+bounces-73636-greg=kroah.com@vger.kernel.org Thu Sep 5 17:32:29 2024
+From: cel@kernel.org
+Date: Thu, 5 Sep 2024 11:30:56 -0400
+Subject: sunrpc: use the struct net as the svc proc private
+To: <stable@vger.kernel.org>
+Cc: <linux-nfs@vger.kernel.org>, Petr Vorel <pvorel@suse.cz>, sherry.yang@oracle.com, calum.mackay@oracle.com, kernel-team@fb.com, Josef Bacik <josef@toxicpanda.com>, Jeff Layton <jlayton@kernel.org>
+Message-ID: <20240905153101.59927-15-cel@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 418b9687dece5bd763c09b5c27a801a7e3387be9 ]
+
+nfsd is the only thing using this helper, and it doesn't use the private
+currently. When we switch to per-network namespace stats we will need
+the struct net * in order to get to the nfsd_net. Use the net as the
+proc private so we can utilize this when we make the switch over.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/stats.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister);
+ struct proc_dir_entry *
+ svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
+ {
+- return do_register(net, statp->program->pg_name, statp, proc_ops);
++ return do_register(net, statp->program->pg_name, net, proc_ops);
+ }
+ EXPORT_SYMBOL_GPL(svc_proc_register);
+