--- /dev/null
+From 5bd8baab087dff657e05387aee802e70304cc813 Mon Sep 17 00:00:00 2001
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Wed, 13 Apr 2022 10:10:50 +0200
+Subject: esp: limit skb_page_frag_refill use to a single page
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+commit 5bd8baab087dff657e05387aee802e70304cc813 upstream.
+
+Commit ebe48d368e97 ("esp: Fix possible buffer overflow in ESP
+transformation") tried to fix skb_page_frag_refill usage in ESP by
+capping allocsize to 32k, but that doesn't completely solve the issue,
+as skb_page_frag_refill may return a single page. If that happens, we
+will write out of bounds, despite the check introduced in the previous
+patch.
+
+This patch forces COW in cases where we would end up calling
+skb_page_frag_refill with a size larger than a page (first in
+esp_output_head with tailen, then in esp_output_tail with
+skb->data_len).
+
+Fixes: cac2661c53f3 ("esp4: Avoid skb_cow_data whenever possible")
+Fixes: 03e2a30f6a27 ("esp6: Avoid skb_cow_data whenever possible")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/esp.h | 2 --
+ net/ipv4/esp4.c | 5 ++---
+ net/ipv6/esp6.c | 5 ++---
+ 3 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/include/net/esp.h
++++ b/include/net/esp.h
+@@ -4,8 +4,6 @@
+
+ #include <linux/skbuff.h>
+
+-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
+-
+ struct ip_esp_hdr;
+
+ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -275,7 +275,6 @@ int esp_output_head(struct xfrm_state *x
+ struct page *page;
+ struct sk_buff *trailer;
+ int tailen = esp->tailen;
+- unsigned int allocsz;
+
+ /* this is non-NULL only with UDP Encapsulation */
+ if (x->encap) {
+@@ -285,8 +284,8 @@ int esp_output_head(struct xfrm_state *x
+ return err;
+ }
+
+- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
+ goto cow;
+
+ if (!skb_cloned(skb)) {
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -241,10 +241,9 @@ int esp6_output_head(struct xfrm_state *
+ struct page *page;
+ struct sk_buff *trailer;
+ int tailen = esp->tailen;
+- unsigned int allocsz;
+
+- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
++ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
++ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
+ goto cow;
+
+ if (!skb_cloned(skb)) {
--- /dev/null
+From eeaa345e128515135ccb864c04482180c08e3259 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 8 Jun 2022 20:22:05 +0200
+Subject: mm/slub: add missing TID updates on slab deactivation
+
+From: Jann Horn <jannh@google.com>
+
+commit eeaa345e128515135ccb864c04482180c08e3259 upstream.
+
+The fastpath in slab_alloc_node() assumes that c->slab is stable as long as
+the TID stays the same. However, two places in __slab_alloc() currently
+don't update the TID when deactivating the CPU slab.
+
+If multiple operations race the right way, this could lead to an object
+getting lost; or, in an even more unlikely situation, it could even lead to
+an object being freed onto the wrong slab's freelist, messing up the
+`inuse` counter and eventually causing a page to be freed to the page
+allocator while it still contains slab objects.
+
+(I haven't actually tested these cases though, this is just based on
+looking at the code. Writing testcases for this stuff seems like it'd be
+a pain...)
+
+The race leading to state inconsistency is (all operations on the same CPU
+and kmem_cache):
+
+ - task A: begin do_slab_free():
+ - read TID
+ - read pcpu freelist (==NULL)
+ - check `slab == c->slab` (true)
+ - [PREEMPT A->B]
+ - task B: begin slab_alloc_node():
+ - fastpath fails (`c->freelist` is NULL)
+ - enter __slab_alloc()
+ - slub_get_cpu_ptr() (disables preemption)
+ - enter ___slab_alloc()
+ - take local_lock_irqsave()
+ - read c->freelist as NULL
+ - get_freelist() returns NULL
+ - write `c->slab = NULL`
+ - drop local_unlock_irqrestore()
+ - goto new_slab
+ - slub_percpu_partial() is NULL
+ - get_partial() returns NULL
+ - slub_put_cpu_ptr() (enables preemption)
+ - [PREEMPT B->A]
+ - task A: finish do_slab_free():
+ - this_cpu_cmpxchg_double() succeeds()
+ - [CORRUPT STATE: c->slab==NULL, c->freelist!=NULL]
+
+From there, the object on c->freelist will get lost if task B is allowed to
+continue from here: It will proceed to the retry_load_slab label,
+set c->slab, then jump to load_freelist, which clobbers c->freelist.
+
+But if we instead continue as follows, we get worse corruption:
+
+ - task A: run __slab_free() on object from other struct slab:
+ - CPU_PARTIAL_FREE case (slab was on no list, is now on pcpu partial)
+ - task A: run slab_alloc_node() with NUMA node constraint:
+ - fastpath fails (c->slab is NULL)
+ - call __slab_alloc()
+ - slub_get_cpu_ptr() (disables preemption)
+ - enter ___slab_alloc()
+ - c->slab is NULL: goto new_slab
+ - slub_percpu_partial() is non-NULL
+ - set c->slab to slub_percpu_partial(c)
+ - [CORRUPT STATE: c->slab points to slab-1, c->freelist has objects
+ from slab-2]
+ - goto redo
+ - node_match() fails
+ - goto deactivate_slab
+ - existing c->freelist is passed into deactivate_slab()
+ - inuse count of slab-1 is decremented to account for object from
+ slab-2
+
+At this point, the inuse count of slab-1 is 1 lower than it should be.
+This means that if we free all allocated objects in slab-1 except for one,
+SLUB will think that slab-1 is completely unused, and may free its page,
+leading to use-after-free.
+
+Fixes: c17dda40a6a4e ("slub: Separate out kmem_cache_cpu processing from deactivate_slab")
+Fixes: 03e404af26dc2 ("slub: fast release on full slab")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jann Horn <jannh@google.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Reviewed-by: Muchun Song <songmuchun@bytedance.com>
+Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Link: https://lore.kernel.org/r/20220608182205.2945720-1-jannh@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2162,6 +2162,7 @@ redo:
+
+ c->page = NULL;
+ c->freelist = NULL;
++ c->tid = next_tid(c->tid);
+ }
+
+ /*
+@@ -2295,8 +2296,6 @@ static inline void flush_slab(struct kme
+ {
+ stat(s, CPUSLAB_FLUSH);
+ deactivate_slab(s, c->page, c->freelist, c);
+-
+- c->tid = next_tid(c->tid);
+ }
+
+ /*
+@@ -2583,6 +2582,7 @@ redo:
+
+ if (!freelist) {
+ c->page = NULL;
++ c->tid = next_tid(c->tid);
+ stat(s, DEACTIVATE_BYPASS);
+ goto new_slab;
+ }