]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .27 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:24:56 +0000 (17:24 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 30 Jun 2009 00:24:56 +0000 (17:24 -0700)
queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch [new file with mode: 0644]
queue-2.6.27/series
queue-2.6.27/tcp-advertise-mss-requested-by-user.patch [new file with mode: 0644]

diff --git a/queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch b/queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
new file mode 100644 (file)
index 0000000..bff2813
--- /dev/null
@@ -0,0 +1,35 @@
+From 7a3ab908948b6296ee7e81d42f7c176361c51975 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 16 Jun 2009 16:00:33 -0700
+Subject: md/raid5: add missing call to schedule() after prepare_to_wait()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 7a3ab908948b6296ee7e81d42f7c176361c51975 upstream.
+
+In the unlikely event that reshape progresses past the current request
+while it is waiting for a stripe we need to schedule() before retrying
+for 2 reasons:
+1/ Prevent list corruption from duplicated list_add() calls without
+   intervening list_del().
+2/ Give the reshape code a chance to make some progress to resolve the
+   conflict.
+
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid5.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3431,6 +3431,7 @@ static int make_request(struct request_q
+                               spin_unlock_irq(&conf->device_lock);
+                               if (must_retry) {
+                                       release_stripe(sh);
++                                      schedule();
+                                       goto retry;
+                               }
+                       }
index 854973987a5d7e4629bc84e63df53c55598f1844..d2cd4c967cf3e3d19f27a8b7383a60d9c7926190 100644 (file)
@@ -17,3 +17,5 @@ ib-mlx4-add-strong-ordering-to-local-inval-and-fast-reg-work-requests.patch
 x86-handle-initrd-that-extends-into-unusable-memory.patch
 lockdep-select-frame-pointers-on-x86.patch
 send_sigio_to_task-sanitize-the-usage-of-fown-signum.patch
+md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch
+tcp-advertise-mss-requested-by-user.patch
diff --git a/queue-2.6.27/tcp-advertise-mss-requested-by-user.patch b/queue-2.6.27/tcp-advertise-mss-requested-by-user.patch
new file mode 100644 (file)
index 0000000..6101b39
--- /dev/null
@@ -0,0 +1,103 @@
+From f5fff5dc8a7a3f395b0525c02ba92c95d42b7390 Mon Sep 17 00:00:00 2001
+From: Tom Quetchenbach <virtualphtn@gmail.com>
+Date: Sun, 21 Sep 2008 00:21:51 -0700
+Subject: tcp: advertise MSS requested by user
+
+From: Tom Quetchenbach <virtualphtn@gmail.com>
+
+commit f5fff5dc8a7a3f395b0525c02ba92c95d42b7390 upstream.
+
+I'm trying to use the TCP_MAXSEG option to setsockopt() to set the MSS
+for both sides of a bidirectional connection.
+
+man tcp says: "If this option is set before connection establishment, it
+also changes the MSS value announced to the other end in the initial
+packet."
+
+However, the kernel only uses the MTU/route cache to set the advertised
+MSS. That means if I set the MSS to, say, 500 before calling connect(),
+I will send at most 500-byte packets, but I will still receive 1500-byte
+packets in reply.
+
+This is a bug, either in the kernel or the documentation.
+
+This patch (applies to latest net-2.6) reduces the advertised value to
+that requested by the user as long as setsockopt() is called before
+connect() or accept(). This seems like the behavior that one would
+expect as well as that which is documented.
+
+I've tried to make sure that things that depend on the advertised MSS
+are set correctly.
+
+Signed-off-by: Tom Quetchenbach <virtualphtn@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/tcp_ipv4.c   |    4 ++++
+ net/ipv4/tcp_output.c |   13 ++++++++++---
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1364,6 +1364,10 @@ struct sock *tcp_v4_syn_recv_sock(struct
+       tcp_mtup_init(newsk);
+       tcp_sync_mss(newsk, dst_mtu(dst));
+       newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
++      if (tcp_sk(sk)->rx_opt.user_mss &&
++          tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
++              newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
++
+       tcp_initialize_rcv_mss(newsk);
+ #ifdef CONFIG_TCP_MD5SIG
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2252,6 +2252,7 @@ struct sk_buff *tcp_make_synack(struct s
+       struct sk_buff *skb;
+       struct tcp_md5sig_key *md5;
+       __u8 *md5_hash_location;
++      int mss;
+       skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
+       if (skb == NULL)
+@@ -2262,13 +2263,17 @@ struct sk_buff *tcp_make_synack(struct s
+       skb->dst = dst_clone(dst);
++      mss = dst_metric(dst, RTAX_ADVMSS);
++      if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
++              mss = tp->rx_opt.user_mss;
++
+       if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
+               __u8 rcv_wscale;
+               /* Set this up on the first call only */
+               req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+               /* tcp_full_space because it is guaranteed to be the first packet */
+               tcp_select_initial_window(tcp_full_space(sk),
+-                      dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
++                      mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+                       &req->rcv_wnd,
+                       &req->window_clamp,
+                       ireq->wscale_ok,
+@@ -2283,8 +2288,7 @@ struct sk_buff *tcp_make_synack(struct s
+       else
+ #endif
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+-      tcp_header_size = tcp_synack_options(sk, req,
+-                                           dst_metric(dst, RTAX_ADVMSS),
++      tcp_header_size = tcp_synack_options(sk, req, mss,
+                                            skb, &opts, &md5) +
+                         sizeof(struct tcphdr);
+@@ -2353,6 +2357,9 @@ static void tcp_connect_init(struct sock
+       if (!tp->window_clamp)
+               tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
+       tp->advmss = dst_metric(dst, RTAX_ADVMSS);
++      if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
++              tp->advmss = tp->rx_opt.user_mss;
++
+       tcp_initialize_rcv_mss(sk);
+       tcp_select_initial_window(tcp_full_space(sk),