From: Greg Kroah-Hartman Date: Tue, 30 Jun 2009 00:24:56 +0000 (-0700) Subject: more .27 patches X-Git-Tag: v2.6.27.26~19 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7064e2468ee7b1d95bbd8021b0cab38250add1d3;p=thirdparty%2Fkernel%2Fstable-queue.git more .27 patches --- diff --git a/queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch b/queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch new file mode 100644 index 00000000000..bff2813ce59 --- /dev/null +++ b/queue-2.6.27/md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch @@ -0,0 +1,35 @@ +From 7a3ab908948b6296ee7e81d42f7c176361c51975 Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Tue, 16 Jun 2009 16:00:33 -0700 +Subject: md/raid5: add missing call to schedule() after prepare_to_wait() + +From: Dan Williams + +commit 7a3ab908948b6296ee7e81d42f7c176361c51975 upstream. + +In the unlikely event that reshape progresses past the current request +while it is waiting for a stripe we need to schedule() before retrying +for 2 reasons: +1/ Prevent list corruption from duplicated list_add() calls without + intervening list_del(). +2/ Give the reshape code a chance to make some progress to resolve the + conflict. + +Signed-off-by: Dan Williams +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid5.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -3431,6 +3431,7 @@ static int make_request(struct request_q + spin_unlock_irq(&conf->device_lock); + if (must_retry) { + release_stripe(sh); ++ schedule(); + goto retry; + } + } diff --git a/queue-2.6.27/series b/queue-2.6.27/series index 854973987a5..d2cd4c967cf 100644 --- a/queue-2.6.27/series +++ b/queue-2.6.27/series @@ -17,3 +17,5 @@ ib-mlx4-add-strong-ordering-to-local-inval-and-fast-reg-work-requests.patch x86-handle-initrd-that-extends-into-unusable-memory.patch lockdep-select-frame-pointers-on-x86.patch send_sigio_to_task-sanitize-the-usage-of-fown-signum.patch +md-raid5-add-missing-call-to-schedule-after-prepare_to_wait.patch +tcp-advertise-mss-requested-by-user.patch diff --git a/queue-2.6.27/tcp-advertise-mss-requested-by-user.patch b/queue-2.6.27/tcp-advertise-mss-requested-by-user.patch new file mode 100644 index 00000000000..6101b3997e6 --- /dev/null +++ b/queue-2.6.27/tcp-advertise-mss-requested-by-user.patch @@ -0,0 +1,103 @@ +From f5fff5dc8a7a3f395b0525c02ba92c95d42b7390 Mon Sep 17 00:00:00 2001 +From: Tom Quetchenbach +Date: Sun, 21 Sep 2008 00:21:51 -0700 +Subject: tcp: advertise MSS requested by user + +From: Tom Quetchenbach + +commit f5fff5dc8a7a3f395b0525c02ba92c95d42b7390 upstream. + +I'm trying to use the TCP_MAXSEG option to setsockopt() to set the MSS +for both sides of a bidirectional connection. + +man tcp says: "If this option is set before connection establishment, it +also changes the MSS value announced to the other end in the initial +packet." + +However, the kernel only uses the MTU/route cache to set the advertised +MSS. That means if I set the MSS to, say, 500 before calling connect(), +I will send at most 500-byte packets, but I will still receive 1500-byte +packets in reply. + +This is a bug, either in the kernel or the documentation. + +This patch (applies to latest net-2.6) reduces the advertised value to +that requested by the user as long as setsockopt() is called before +connect() or accept(). This seems like the behavior that one would +expect as well as that which is documented. + +I've tried to make sure that things that depend on the advertised MSS +are set correctly. + +Signed-off-by: Tom Quetchenbach +Signed-off-by: David S. Miller +Cc: Willy Tarreau +Signed-off-by: Greg Kroah-Hartman + +--- + net/ipv4/tcp_ipv4.c | 4 ++++ + net/ipv4/tcp_output.c | 13 ++++++++++--- + 2 files changed, 14 insertions(+), 3 deletions(-) + +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -1364,6 +1364,10 @@ struct sock *tcp_v4_syn_recv_sock(struct + tcp_mtup_init(newsk); + tcp_sync_mss(newsk, dst_mtu(dst)); + newtp->advmss = dst_metric(dst, RTAX_ADVMSS); ++ if (tcp_sk(sk)->rx_opt.user_mss && ++ tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) ++ newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; ++ + tcp_initialize_rcv_mss(newsk); + + #ifdef CONFIG_TCP_MD5SIG +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -2252,6 +2252,7 @@ struct sk_buff *tcp_make_synack(struct s + struct sk_buff *skb; + struct tcp_md5sig_key *md5; + __u8 *md5_hash_location; ++ int mss; + + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); + if (skb == NULL) +@@ -2262,13 +2263,17 @@ struct sk_buff *tcp_make_synack(struct s + + skb->dst = dst_clone(dst); + ++ mss = dst_metric(dst, RTAX_ADVMSS); ++ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) ++ mss = tp->rx_opt.user_mss; ++ + if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ + __u8 rcv_wscale; + /* Set this up on the first call only */ + req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); + /* tcp_full_space because it is guaranteed to be the first packet */ + tcp_select_initial_window(tcp_full_space(sk), +- dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), ++ mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), + &req->rcv_wnd, + &req->window_clamp, + ireq->wscale_ok, +@@ -2283,8 +2288,7 @@ struct sk_buff *tcp_make_synack(struct s + else + #endif + TCP_SKB_CB(skb)->when = tcp_time_stamp; +- tcp_header_size = tcp_synack_options(sk, req, +- dst_metric(dst, RTAX_ADVMSS), ++ tcp_header_size = tcp_synack_options(sk, req, mss, + skb, &opts, &md5) + + sizeof(struct tcphdr); + +@@ -2353,6 +2357,9 @@ static void tcp_connect_init(struct sock + if (!tp->window_clamp) + tp->window_clamp = dst_metric(dst, RTAX_WINDOW); + tp->advmss = dst_metric(dst, RTAX_ADVMSS); ++ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) ++ tp->advmss = tp->rx_opt.user_mss; ++ + tcp_initialize_rcv_mss(sk); + + tcp_select_initial_window(tcp_full_space(sk),