]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Linux 2.6.16.15 release v2.6.16.15
authorChris Wright <chrisw@sous-sol.org>
Tue, 9 May 2006 19:51:38 +0000 (12:51 -0700)
committerChris Wright <chrisw@sous-sol.org>
Tue, 9 May 2006 19:51:38 +0000 (12:51 -0700)
- add SCTP fixes for CVE-2006-2274 and CVE-2006-2275, for a total of 4
  SCTP security fixes

releases/2.6.16.15/sctp-allow-spillover-of-receive-buffer-to-avoid-deadlock.patch [new file with mode: 0644]
releases/2.6.16.15/sctp-prevent-possible-infinite-recursion-with-multiple-bundled-data.patch [new file with mode: 0644]
releases/2.6.16.15/series

diff --git a/releases/2.6.16.15/sctp-allow-spillover-of-receive-buffer-to-avoid-deadlock.patch b/releases/2.6.16.15/sctp-allow-spillover-of-receive-buffer-to-avoid-deadlock.patch
new file mode 100644 (file)
index 0000000..5c534c1
--- /dev/null
@@ -0,0 +1,138 @@
+From nobody Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Fri, 5 May 2006 17:02:09 -0700
+Subject: [PATCH] SCTP: Allow spillover of receive buffer to avoid deadlock. (CVE-2006-2275)
+
+This patch fixes a deadlock situation in the receive path by allowing
+temporary spillover of the receive buffer.
+
+- If the chunk we receive has a tsn that immediately follows the ctsn,
+  accept it even if we run out of receive buffer space and renege data with
+  higher TSNs.
+- Once we accept one chunk in a packet, accept all the remaining chunks
+  even if we run out of receive buffer space.
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Mark Butler <butlerm@middle.net>
+Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
+Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+
+---
+
+ include/net/sctp/structs.h |    1 +
+ net/sctp/inqueue.c         |    1 +
+ net/sctp/sm_statefuns.c    |   44 +++++++++++++++++++++++++++++++++++---------
+ 3 files changed, 37 insertions(+), 9 deletions(-)
+
+--- linux-2.6.16.14.orig/include/net/sctp/structs.h
++++ linux-2.6.16.14/include/net/sctp/structs.h
+@@ -702,6 +702,7 @@ struct sctp_chunk {
+       __u8 tsn_gap_acked;     /* Is this chunk acked by a GAP ACK? */
+       __s8 fast_retransmit;    /* Is this chunk fast retransmitted? */
+       __u8 tsn_missing_report; /* Data chunk missing counter. */
++      __u8 data_accepted;     /* At least 1 chunk in this packet accepted */
+ };
+ void sctp_chunk_hold(struct sctp_chunk *);
+--- linux-2.6.16.14.orig/net/sctp/inqueue.c
++++ linux-2.6.16.14/net/sctp/inqueue.c
+@@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct s
+               /* This is the first chunk in the packet.  */
+               chunk->singleton = 1;
+               ch = (sctp_chunkhdr_t *) chunk->skb->data;
++              chunk->data_accepted = 0;
+       }
+         chunk->chunk_hdr = ch;
+--- linux-2.6.16.14.orig/net/sctp/sm_statefuns.c
++++ linux-2.6.16.14/net/sctp/sm_statefuns.c
+@@ -5154,7 +5154,9 @@ static int sctp_eat_data(const struct sc
+       int tmp;
+       __u32 tsn;
+       int account_value;
++      struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
+       struct sock *sk = asoc->base.sk;
++      int rcvbuf_over = 0;
+       data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
+       skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+@@ -5165,10 +5167,16 @@ static int sctp_eat_data(const struct sc
+       /* ASSERT:  Now skb->data is really the user data.  */
+       /*
+-       * if we are established, and we have used up our receive
+-       * buffer memory, drop the frame
++       * If we are established, and we have used up our receive buffer
++       * memory, think about droping the frame.
++       * Note that we have an opportunity to improve performance here.
++       * If we accept one chunk from an skbuff, we have to keep all the
++       * memory of that skbuff around until the chunk is read into user
++       * space. Therefore, once we accept 1 chunk we may as well accept all
++       * remaining chunks in the skbuff. The data_accepted flag helps us do
++       * that.
+        */
+-      if (asoc->state == SCTP_STATE_ESTABLISHED) {
++      if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
+               /*
+                * If the receive buffer policy is 1, then each
+                * association can allocate up to sk_rcvbuf bytes
+@@ -5179,9 +5187,25 @@ static int sctp_eat_data(const struct sc
+                       account_value = atomic_read(&asoc->rmem_alloc);
+               else
+                       account_value = atomic_read(&sk->sk_rmem_alloc);
+-
+-              if (account_value > sk->sk_rcvbuf)
+-                      return SCTP_IERROR_IGNORE_TSN;
++              if (account_value > sk->sk_rcvbuf) {
++                      /*
++                       * We need to make forward progress, even when we are
++                       * under memory pressure, so we always allow the
++                       * next tsn after the ctsn ack point to be accepted.
++                       * This lets us avoid deadlocks in which we have to
++                       * drop frames that would otherwise let us drain the
++                       * receive queue.
++                       */
++                      if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
++                              return SCTP_IERROR_IGNORE_TSN;
++
++                      /*
++                       * We're going to accept the frame but we should renege
++                       * to make space for it. This will send us down that
++                       * path later in this function.
++                       */
++                      rcvbuf_over = 1;
++              }
+       }
+       /* Process ECN based congestion.
+@@ -5229,6 +5253,7 @@ static int sctp_eat_data(const struct sc
+       datalen -= sizeof(sctp_data_chunk_t);
+       deliver = SCTP_CMD_CHUNK_ULP;
++      chunk->data_accepted = 1;
+       /* Think about partial delivery. */
+       if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+@@ -5245,7 +5270,8 @@ static int sctp_eat_data(const struct sc
+        * large spill over.
+        */
+       if (!asoc->rwnd || asoc->rwnd_over ||
+-          (datalen > asoc->rwnd + asoc->frag_point)) {
++          (datalen > asoc->rwnd + asoc->frag_point) ||
++          rcvbuf_over) {
+               /* If this is the next TSN, consider reneging to make
+                * room.   Note: Playing nice with a confused sender.  A
+@@ -5253,8 +5279,8 @@ static int sctp_eat_data(const struct sc
+                * space and in the future we may want to detect and
+                * do more drastic reneging.
+                */
+-              if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
+-                  (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
++              if (sctp_tsnmap_has_gap(map) &&
++                  (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
+                       SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
+                       deliver = SCTP_CMD_RENEGE;
+               } else {
diff --git a/releases/2.6.16.15/sctp-prevent-possible-infinite-recursion-with-multiple-bundled-data.patch b/releases/2.6.16.15/sctp-prevent-possible-infinite-recursion-with-multiple-bundled-data.patch
new file mode 100644 (file)
index 0000000..748b9cc
--- /dev/null
@@ -0,0 +1,75 @@
+From nobody Mon Sep 17 00:00:00 2001
+From: Vladislav Yasevich <vladsilav.yasevich@hp.com>
+Date: Fri, 5 May 2006 17:03:49 -0700
+Subject: [PATCH] SCTP: Prevent possible infinite recursion with multiple bundled DATA. (CVE-2006-2274)
+
+There is a rare situation that causes lksctp to go into infinite recursion
+and crash the system.  The trigger is a packet that contains at least the
+first two DATA fragments of a message bundled together. The recursion is
+triggered when the user data buffer is smaller that the full data message.
+The problem is that we clone the skb for every fragment in the message.
+When reassembling the full message, we try to link skbs from the "first
+fragment" clone using the frag_list. However, since the frag_list is shared
+between two clones in this rare situation, we end up setting the frag_list
+pointer of the second fragment to point to itself.  This causes
+sctp_skb_pull() to potentially recurse indefinitely.
+
+Proposed solution is to make a copy of the skb when attempting to link
+things using frag_list.
+
+Signed-off-by: Vladislav Yasevich <vladsilav.yasevich@hp.com>
+Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+
+---
+
+ net/sctp/ulpqueue.c |   27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+--- linux-2.6.16.14.orig/net/sctp/ulpqueue.c
++++ linux-2.6.16.14/net/sctp/ulpqueue.c
+@@ -279,6 +279,7 @@ static inline void sctp_ulpq_store_reasm
+ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
+ {
+       struct sk_buff *pos;
++      struct sk_buff *new = NULL;
+       struct sctp_ulpevent *event;
+       struct sk_buff *pnext, *last;
+       struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
+@@ -297,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_r
+        */
+       if (last)
+               last->next = pos;
+-      else
+-              skb_shinfo(f_frag)->frag_list = pos;
++      else {
++              if (skb_cloned(f_frag)) {
++                      /* This is a cloned skb, we can't just modify
++                       * the frag_list.  We need a new skb to do that.
++                       * Instead of calling skb_unshare(), we'll do it
++                       * ourselves since we need to delay the free.
++                       */
++                      new = skb_copy(f_frag, GFP_ATOMIC);
++                      if (!new)
++                              return NULL;    /* try again later */
++
++                      new->sk = f_frag->sk;
++
++                      skb_shinfo(new)->frag_list = pos;
++              } else
++                      skb_shinfo(f_frag)->frag_list = pos;
++      }
+       /* Remove the first fragment from the reassembly queue.  */
+       __skb_unlink(f_frag, queue);
++
++      /* if we did unshare, then free the old skb and re-assign */
++      if (new) {
++              kfree_skb(f_frag);
++              f_frag = new;
++      }
++
+       while (pos) {
+               pnext = pos->next;
index e5ca588a67137956947e9633545d095557e7826d..4ebb21b22f4d6f18c7a849cea22681d84ca105d6 100644 (file)
@@ -1,2 +1,4 @@
 fix-panic-s-when-receiving-fragmented-sctp-control-chunks.patch
 fix-state-table-entries-for-chunks-received-in-closed-state.patch
+sctp-allow-spillover-of-receive-buffer-to-avoid-deadlock.patch
+sctp-prevent-possible-infinite-recursion-with-multiple-bundled-data.patch