]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 9 Dec 2023 12:37:44 +0000 (13:37 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 9 Dec 2023 12:37:44 +0000 (13:37 +0100)
added patches:
packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch

queue-4.14/packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch b/queue-4.14/packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch
new file mode 100644 (file)
index 0000000..e45d573
--- /dev/null
@@ -0,0 +1,109 @@
+From db3fadacaf0c817b222090290d06ca2a338422d0 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 1 Dec 2023 14:10:21 +0100
+Subject: packet: Move reference count in packet_sock to atomic_long_t
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit db3fadacaf0c817b222090290d06ca2a338422d0 upstream.
+
+In some potential instances the reference count on struct packet_sock
+could be saturated and cause overflows which gets the kernel a bit
+confused. To prevent this, move to a 64-bit atomic reference count on
+64-bit architectures to prevent the possibility of this type to overflow.
+
+Because we can not handle saturation, using refcount_t is not possible
+in this place. Maybe someday in the future if it changes it could be
+used. Also, instead of using plain atomic64_t, use atomic_long_t instead.
+32-bit machines tend to be memory-limited (i.e. anything that increases
+a reference uses so much memory that you can't actually get to 2**32
+references). 32-bit architectures also tend to have serious problems
+with 64-bit atomics. Hence, atomic_long_t is the more natural solution.
+
+Reported-by: "The UK's National Cyber Security Centre (NCSC)" <security@ncsc.gov.uk>
+Co-developed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@kernel.org
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20231201131021.19999-1-daniel@iogearbox.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c |   16 ++++++++--------
+ net/packet/internal.h  |    2 +-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4252,7 +4252,7 @@ static void packet_mm_open(struct vm_are
+       struct sock *sk = sock->sk;
+       if (sk)
+-              atomic_inc(&pkt_sk(sk)->mapped);
++              atomic_long_inc(&pkt_sk(sk)->mapped);
+ }
+ static void packet_mm_close(struct vm_area_struct *vma)
+@@ -4262,7 +4262,7 @@ static void packet_mm_close(struct vm_ar
+       struct sock *sk = sock->sk;
+       if (sk)
+-              atomic_dec(&pkt_sk(sk)->mapped);
++              atomic_long_dec(&pkt_sk(sk)->mapped);
+ }
+ static const struct vm_operations_struct packet_mmap_ops = {
+@@ -4357,7 +4357,7 @@ static int packet_set_ring(struct sock *
+       err = -EBUSY;
+       if (!closing) {
+-              if (atomic_read(&po->mapped))
++              if (atomic_long_read(&po->mapped))
+                       goto out;
+               if (packet_read_pending(rb))
+                       goto out;
+@@ -4460,7 +4460,7 @@ static int packet_set_ring(struct sock *
+       err = -EBUSY;
+       mutex_lock(&po->pg_vec_lock);
+-      if (closing || atomic_read(&po->mapped) == 0) {
++      if (closing || atomic_long_read(&po->mapped) == 0) {
+               err = 0;
+               spin_lock_bh(&rb_queue->lock);
+               swap(rb->pg_vec, pg_vec);
+@@ -4478,9 +4478,9 @@ static int packet_set_ring(struct sock *
+               po->prot_hook.func = (po->rx_ring.pg_vec) ?
+                                               tpacket_rcv : packet_rcv;
+               skb_queue_purge(rb_queue);
+-              if (atomic_read(&po->mapped))
+-                      pr_err("packet_mmap: vma is busy: %d\n",
+-                             atomic_read(&po->mapped));
++              if (atomic_long_read(&po->mapped))
++                      pr_err("packet_mmap: vma is busy: %ld\n",
++                             atomic_long_read(&po->mapped));
+       }
+       mutex_unlock(&po->pg_vec_lock);
+@@ -4558,7 +4558,7 @@ static int packet_mmap(struct file *file
+               }
+       }
+-      atomic_inc(&po->mapped);
++      atomic_long_inc(&po->mapped);
+       vma->vm_ops = &packet_mmap_ops;
+       err = 0;
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -125,7 +125,7 @@ struct packet_sock {
+       __be16                  num;
+       struct packet_rollover  *rollover;
+       struct packet_mclist    *mclist;
+-      atomic_t                mapped;
++      atomic_long_t           mapped;
+       enum tpacket_versions   tp_version;
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
index 4687cfb09d4063f64218ca743e57c81b51071a9d..e11af40c116e75aae95dd18702be1b7c12b0aeca 100644 (file)
@@ -13,3 +13,4 @@ nilfs2-prevent-warning-in-nilfs_sufile_set_segment_usage.patch
 tracing-always-update-snapshot-buffer-size.patch
 tracing-fix-incomplete-locking-when-disabling-buffered-events.patch
 tracing-fix-a-possible-race-when-disabling-buffered-events.patch
+packet-move-reference-count-in-packet_sock-to-atomic_long_t.patch