--- /dev/null
+From af6d10345ca76670c1b7c37799f0d5576ccef277 Mon Sep 17 00:00:00 2001
+From: Jon Maxwell <jmaxwell37@gmail.com>
+Date: Thu, 12 Jan 2023 12:25:32 +1100
+Subject: ipv6: remove max_size check inline with ipv4
+
+From: Jon Maxwell <jmaxwell37@gmail.com>
+
+commit af6d10345ca76670c1b7c37799f0d5576ccef277 upstream.
+
+In ip6_dst_gc() replace:
+
+ if (entries > gc_thresh)
+
+With:
+
+ if (entries > ops->gc_thresh)
+
+Sending Ipv6 packets in a loop via a raw socket triggers an issue where a
+route is cloned by ip6_rt_cache_alloc() for each packet sent. This quickly
+consumes the Ipv6 max_size threshold which defaults to 4096 resulting in
+these warnings:
+
+[1] 99.187805] dst_alloc: 7728 callbacks suppressed
+[2] Route cache is full: consider increasing sysctl net.ipv6.route.max_size.
+.
+.
+[300] Route cache is full: consider increasing sysctl net.ipv6.route.max_size.
+
+When this happens the packet is dropped and sendto() gets a network is
+unreachable error:
+
+remaining pkt 200557 errno 101
+remaining pkt 196462 errno 101
+.
+.
+remaining pkt 126821 errno 101
+
+Implement David Aherns suggestion to remove max_size check seeing that Ipv6
+has a GC to manage memory usage. Ipv4 already does not check max_size.
+
+Here are some memory comparisons for Ipv4 vs Ipv6 with the patch:
+
+Test by running 5 instances of a program that sends UDP packets to a raw
+socket 5000000 times. Compare Ipv4 and Ipv6 performance with a similar
+program.
+
+Ipv4:
+
+Before test:
+
+MemFree: 29427108 kB
+Slab: 237612 kB
+
+ip6_dst_cache 1912 2528 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 2881 3990 192 42 2 : tunables 0 0 0
+
+During test:
+
+MemFree: 29417608 kB
+Slab: 247712 kB
+
+ip6_dst_cache 1912 2528 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 44394 44394 192 42 2 : tunables 0 0 0
+
+After test:
+
+MemFree: 29422308 kB
+Slab: 238104 kB
+
+ip6_dst_cache 1912 2528 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 3048 4116 192 42 2 : tunables 0 0 0
+
+Ipv6 with patch:
+
+Errno 101 errors are not observed anymore with the patch.
+
+Before test:
+
+MemFree: 29422308 kB
+Slab: 238104 kB
+
+ip6_dst_cache 1912 2528 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 3048 4116 192 42 2 : tunables 0 0 0
+
+During Test:
+
+MemFree: 29431516 kB
+Slab: 240940 kB
+
+ip6_dst_cache 11980 12064 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 3048 4116 192 42 2 : tunables 0 0 0
+
+After Test:
+
+MemFree: 29441816 kB
+Slab: 238132 kB
+
+ip6_dst_cache 1902 2432 256 32 2 : tunables 0 0 0
+xfrm_dst_cache 0 0 320 25 2 : tunables 0 0 0
+ip_dst_cache 3048 4116 192 42 2 : tunables 0 0 0
+
+Tested-by: Andrea Mayer <andrea.mayer@uniroma2.it>
+Signed-off-by: Jon Maxwell <jmaxwell37@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20230112012532.311021-1-jmaxwell37@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/dst_ops.h | 2 +-
+ net/core/dst.c | 8 ++------
+ net/ipv6/route.c | 13 +++++--------
+ 3 files changed, 8 insertions(+), 15 deletions(-)
+
+--- a/include/net/dst_ops.h
++++ b/include/net/dst_ops.h
+@@ -16,7 +16,7 @@ struct dst_ops {
+ unsigned short family;
+ unsigned int gc_thresh;
+
+- int (*gc)(struct dst_ops *ops);
++ void (*gc)(struct dst_ops *ops);
+ struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
+ unsigned int (*default_advmss)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -82,12 +82,8 @@ void *dst_alloc(struct dst_ops *ops, str
+
+ if (ops->gc &&
+ !(flags & DST_NOCOUNT) &&
+- dst_entries_get_fast(ops) > ops->gc_thresh) {
+- if (ops->gc(ops)) {
+- pr_notice_ratelimited("Route cache is full: consider increasing sysctl net.ipv6.route.max_size.\n");
+- return NULL;
+- }
+- }
++ dst_entries_get_fast(ops) > ops->gc_thresh)
++ ops->gc(ops);
+
+ dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+ if (!dst)
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -91,7 +91,7 @@ static struct dst_entry *ip6_negative_ad
+ static void ip6_dst_destroy(struct dst_entry *);
+ static void ip6_dst_ifdown(struct dst_entry *,
+ struct net_device *dev, int how);
+-static int ip6_dst_gc(struct dst_ops *ops);
++static void ip6_dst_gc(struct dst_ops *ops);
+
+ static int ip6_pkt_discard(struct sk_buff *skb);
+ static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+@@ -3288,11 +3288,10 @@ out:
+ return dst;
+ }
+
+-static int ip6_dst_gc(struct dst_ops *ops)
++static void ip6_dst_gc(struct dst_ops *ops)
+ {
+ struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
+ int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
+- int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
+ int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
+ int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
+ unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+@@ -3300,11 +3299,10 @@ static int ip6_dst_gc(struct dst_ops *op
+ int entries;
+
+ entries = dst_entries_get_fast(ops);
+- if (entries > rt_max_size)
++ if (entries > ops->gc_thresh)
+ entries = dst_entries_get_slow(ops);
+
+- if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
+- entries <= rt_max_size)
++ if (time_after(rt_last_gc + rt_min_interval, jiffies))
+ goto out;
+
+ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
+@@ -3314,7 +3312,6 @@ static int ip6_dst_gc(struct dst_ops *op
+ out:
+ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
+- return entries > rt_max_size;
+ }
+
+ static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
+@@ -6517,7 +6514,7 @@ static int __net_init ip6_route_net_init
+ #endif
+
+ net->ipv6.sysctl.flush_delay = 0;
+- net->ipv6.sysctl.ip6_rt_max_size = 4096;
++ net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
+ net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
+ net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
+ net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
--- /dev/null
+From 172db56d90d29e47e7d0d64885d5dbd92c87ec42 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 6 Dec 2023 12:59:07 -0800
+Subject: netlink: Return unsigned value for nla_len()
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 172db56d90d29e47e7d0d64885d5dbd92c87ec42 upstream.
+
+The return value from nla_len() is never expected to be negative, and can
+never be more than struct nlattr::nla_len (a u16). Adjust the prototype
+on the function. This will let GCC's value range optimization passes
+know that the return can never be negative, and can never be larger than
+u16. As recently discussed[1], this silences the following warning in
+GCC 12+:
+
+net/wireless/nl80211.c: In function 'nl80211_set_cqm_rssi.isra':
+net/wireless/nl80211.c:12892:17: warning: 'memcpy' specified bound 18446744073709551615 exceeds maximum object size 9223372036854775807 [-Wstringop-overflow=]
+12892 | memcpy(cqm_config->rssi_thresholds, thresholds,
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+12893 | flex_array_size(cqm_config, rssi_thresholds,
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+12894 | n_thresholds));
+ | ~~~~~~~~~~~~~~
+
+A future change would be to clamp the subtraction to make sure it never
+wraps around if nla_len is somehow less than NLA_HDRLEN, which would
+have the additional benefit of being defensive in the face of nlattr
+corruption or logic errors.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202311090752.hWcJWAHL-lkp@intel.com/ [1]
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Jeff Johnson <quic_jjohnson@quicinc.com>
+Cc: Michael Walle <mwalle@kernel.org>
+Cc: Max Schulze <max.schulze@online.de>
+Link: https://lore.kernel.org/r/20231202202539.it.704-kees@kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20231206205904.make.018-kees@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/netlink.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -1181,7 +1181,7 @@ static inline void *nla_data(const struc
+ * nla_len - length of payload
+ * @nla: netlink attribute
+ */
+-static inline int nla_len(const struct nlattr *nla)
++static inline u16 nla_len(const struct nlattr *nla)
+ {
+ return nla->nla_len - NLA_HDRLEN;
+ }
--- /dev/null
+From 64e6304169f1e1f078e7f0798033f80a7fb0ea46 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Wed, 3 Jan 2024 08:36:52 -0500
+Subject: nfsd: drop the nfsd_put helper
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 64e6304169f1e1f078e7f0798033f80a7fb0ea46 upstream.
+
+It's not safe to call nfsd_put once nfsd_last_thread has been called, as
+that function will zero out the nn->nfsd_serv pointer.
+
+Drop the nfsd_put helper altogether and open-code the svc_put in its
+callers instead. That allows us to not be reliant on the value of that
+pointer when handling an error.
+
+Fixes: 2a501f55cd64 ("nfsd: call nfsd_last_thread() before final nfsd_put()")
+Reported-by: Zhi Li <yieli@redhat.com>
+Cc: NeilBrown <neilb@suse.de>
+Signed-off-by: Jeffrey Layton <jlayton@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c | 31 +++++++++++++++++--------------
+ fs/nfsd/nfsd.h | 7 -------
+ 2 files changed, 17 insertions(+), 21 deletions(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -705,6 +705,7 @@ static ssize_t __write_ports_addfd(char
+ char *mesg = buf;
+ int fd, err;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++ struct svc_serv *serv;
+
+ err = get_int(&mesg, &fd);
+ if (err != 0 || fd < 0)
+@@ -714,15 +715,15 @@ static ssize_t __write_ports_addfd(char
+ if (err != 0)
+ return err;
+
+- err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
++ serv = nn->nfsd_serv;
++ err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+
+- if (err < 0 && !nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
++ if (err < 0 && !serv->sv_nrthreads && !nn->keep_active)
+ nfsd_last_thread(net);
+- else if (err >= 0 &&
+- !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+- svc_get(nn->nfsd_serv);
++ else if (err >= 0 && !serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++ svc_get(serv);
+
+- nfsd_put(net);
++ svc_put(serv);
+ return err;
+ }
+
+@@ -736,6 +737,7 @@ static ssize_t __write_ports_addxprt(cha
+ struct svc_xprt *xprt;
+ int port, err;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
++ struct svc_serv *serv;
+
+ if (sscanf(buf, "%15s %5u", transport, &port) != 2)
+ return -EINVAL;
+@@ -747,32 +749,33 @@ static ssize_t __write_ports_addxprt(cha
+ if (err != 0)
+ return err;
+
+- err = svc_xprt_create(nn->nfsd_serv, transport, net,
++ serv = nn->nfsd_serv;
++ err = svc_xprt_create(serv, transport, net,
+ PF_INET, port, SVC_SOCK_ANONYMOUS, cred);
+ if (err < 0)
+ goto out_err;
+
+- err = svc_xprt_create(nn->nfsd_serv, transport, net,
++ err = svc_xprt_create(serv, transport, net,
+ PF_INET6, port, SVC_SOCK_ANONYMOUS, cred);
+ if (err < 0 && err != -EAFNOSUPPORT)
+ goto out_close;
+
+- if (!nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+- svc_get(nn->nfsd_serv);
++ if (!serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
++ svc_get(serv);
+
+- nfsd_put(net);
++ svc_put(serv);
+ return 0;
+ out_close:
+- xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
++ xprt = svc_find_xprt(serv, transport, net, PF_INET, port);
+ if (xprt != NULL) {
+ svc_xprt_close(xprt);
+ svc_xprt_put(xprt);
+ }
+ out_err:
+- if (!nn->nfsd_serv->sv_nrthreads && !nn->keep_active)
++ if (!serv->sv_nrthreads && !nn->keep_active)
+ nfsd_last_thread(net);
+
+- nfsd_put(net);
++ svc_put(serv);
+ return err;
+ }
+
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -97,13 +97,6 @@ int nfsd_pool_stats_open(struct inode *
+ int nfsd_pool_stats_release(struct inode *, struct file *);
+ void nfsd_shutdown_threads(struct net *net);
+
+-static inline void nfsd_put(struct net *net)
+-{
+- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-
+- svc_put(nn->nfsd_serv);
+-}
+-
+ bool i_am_nfsd(void);
+
+ struct nfsdfs_client {
--- /dev/null
+nfsd-drop-the-nfsd_put-helper.patch
+netlink-return-unsigned-value-for-nla_len.patch
+ipv6-remove-max_size-check-inline-with-ipv4.patch