--- /dev/null
+From 3928f630f0d9fab7a6a43d7b48718be8e32f3fe1 Mon Sep 17 00:00:00 2001
+From: Tilman Schmidt <tilman@imap.cc>
+Date: Wed, 15 Apr 2009 03:25:43 -0700
+Subject: bas_gigaset: correctly allocate USB interrupt transfer buffer
+
+
+[ Upstream commit 170ebf85160dd128e1c4206cc197cce7d1424705 ]
+
+Every USB transfer buffer has to be allocated individually by kmalloc.
+
+Impact: bugfix, no functional change
+
+Signed-off-by: Tilman Schmidt <tilman@imap.cc>
+Tested-by: Kolja Waschk <kawk@users.sourceforge.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/isdn/gigaset/bas-gigaset.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -46,6 +46,9 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode"
+ /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
+ #define IF_WRITEBUF 264
+
++/* interrupt pipe message size according to ibid. ch. 2.2 */
++#define IP_MSGSIZE 3
++
+ /* Values for the Gigaset 307x */
+ #define USB_GIGA_VENDOR_ID 0x0681
+ #define USB_3070_PRODUCT_ID 0x0001
+@@ -110,7 +113,7 @@ struct bas_cardstate {
+ unsigned char *rcvbuf; /* AT reply receive buffer */
+
+ struct urb *urb_int_in; /* URB for interrupt pipe */
+- unsigned char int_in_buf[3];
++ unsigned char *int_in_buf;
+
+ spinlock_t lock; /* locks all following */
+ int basstate; /* bitmap (BS_*) */
+@@ -657,7 +660,7 @@ static void read_int_callback(struct urb
+ }
+
+ /* drop incomplete packets even if the missing bytes wouldn't matter */
+- if (unlikely(urb->actual_length < 3)) {
++ if (unlikely(urb->actual_length < IP_MSGSIZE)) {
+ dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
+ urb->actual_length);
+ goto resubmit;
+@@ -2127,6 +2130,7 @@ static void gigaset_reinitbcshw(struct b
+ static void gigaset_freecshw(struct cardstate *cs)
+ {
+ /* timers, URBs and rcvbuf are disposed of in disconnect */
++ kfree(cs->hw.bas->int_in_buf);
+ kfree(cs->hw.bas);
+ cs->hw.bas = NULL;
+ }
+@@ -2232,6 +2236,12 @@ static int gigaset_probe(struct usb_inte
+ }
+ hostif = interface->cur_altsetting;
+ }
++ ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
++ if (!ucs->int_in_buf) {
++ kfree(ucs);
++ pr_err("out of memory\n");
++ return 0;
++ }
+
+ /* Reject application specific interfaces
+ */
+@@ -2290,7 +2300,7 @@ static int gigaset_probe(struct usb_inte
+ usb_fill_int_urb(ucs->urb_int_in, udev,
+ usb_rcvintpipe(udev,
+ (endpoint->bEndpointAddress) & 0x0f),
+- ucs->int_in_buf, 3, read_int_callback, cs,
++ ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
+ endpoint->bInterval);
+ if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
+ dev_err(cs->dev, "could not submit interrupt URB: %s\n",
--- /dev/null
+From 308ac80d677f5044d9b3437864e5d89a92feb5b2 Mon Sep 17 00:00:00 2001
+From: Jay Vosburgh <fubar@us.ibm.com>
+Date: Sat, 4 Apr 2009 17:23:15 -0700
+Subject: bonding: Fix updating of speed/duplex changes
+
+
+[ Upstream commit 17d04500e2528217de5fe967599f98ee84348a9c ]
+
+ This patch corrects an omission from the following commit:
+
+commit f0c76d61779b153dbfb955db3f144c62d02173c2
+Author: Jay Vosburgh <fubar@us.ibm.com>
+Date: Wed Jul 2 18:21:58 2008 -0700
+
+ bonding: refactor mii monitor
+
+ The un-refactored code checked the link speed and duplex of
+every slave on every pass; the refactored code did not do so.
+
+ The 802.3ad and balance-alb/tlb modes utilize the speed and
+duplex information, and require it to be kept up to date. This patch
+adds a notifier check to perform the appropriate updating when the slave
+device speed changes.
+
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/bonding/bond_main.c | 25 ++++++++++++++++++++-----
+ drivers/net/bonding/bonding.h | 6 ++++++
+ 2 files changed, 26 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/bonding/bonding.h
++++ b/drivers/net/bonding/bonding.h
+@@ -248,6 +248,12 @@ static inline struct bonding *bond_get_b
+ return (struct bonding *)slave->dev->master->priv;
+ }
+
++static inline bool bond_is_lb(const struct bonding *bond)
++{
++ return bond->params.mode == BOND_MODE_TLB
++ || bond->params.mode == BOND_MODE_ALB;
++}
++
+ #define BOND_FOM_NONE 0
+ #define BOND_FOM_ACTIVE 1
+ #define BOND_FOM_FOLLOW 2
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3536,11 +3536,26 @@ static int bond_slave_netdev_event(unsig
+ }
+ break;
+ case NETDEV_CHANGE:
+- /*
+- * TODO: is this what we get if somebody
+- * sets up a hierarchical bond, then rmmod's
+- * one of the slave bonding devices?
+- */
++ if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
++ struct slave *slave;
++
++ slave = bond_get_slave_by_dev(bond, slave_dev);
++ if (slave) {
++ u16 old_speed = slave->speed;
++ u16 old_duplex = slave->duplex;
++
++ bond_update_speed_duplex(slave);
++
++ if (bond_is_lb(bond))
++ break;
++
++ if (old_speed != slave->speed)
++ bond_3ad_adapter_speed_changed(slave);
++ if (old_duplex != slave->duplex)
++ bond_3ad_adapter_duplex_changed(slave);
++ }
++ }
++
+ break;
+ case NETDEV_DOWN:
+ /*
--- /dev/null
+From 8866e77e34c013658b231ba3690250c4db05e38d Mon Sep 17 00:00:00 2001
+From: Stephen Hemminger <shemminger@vyatta.com>
+Date: Wed, 25 Mar 2009 21:01:47 -0700
+Subject: bridge: bad error handling when adding invalid ether address
+
+
+[ Upstream commit cda6d377ec6b2ee2e58d563d0bd7eb313e0165df ]
+
+This fixes an crash when empty bond device is added to a bridge.
+If an interface with invalid ethernet address (all zero) is added
+to a bridge, then bridge code detects it when setting up the forward
+databas entry. But the error unwind is broken, the bridge port object
+can get freed twice: once when ref count went to zeo, and once by kfree.
+Since object is never really accessible, just free it.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/bridge/br_if.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -426,7 +426,6 @@ err2:
+ err1:
+ kobject_del(&p->kobj);
+ err0:
+- kobject_put(&p->kobj);
+ dev_set_promiscuity(dev, -1);
+ put_back:
+ dev_put(dev);
--- /dev/null
+From 6feb7acb128c3ee225d1444fe5bca386b5bd0fa3 Mon Sep 17 00:00:00 2001
+From: Pavel Emelyanov <xemul@openvz.org>
+Date: Thu, 26 Feb 2009 03:35:13 -0800
+Subject: ipv6: don't use tw net when accounting for recycled tw
+
+
+[ Upstream commit 3f53a38131a4e7a053c0aa060aba0411242fb6b9 ]
+
+We already have a valid net in that place, but this is not just a
+cleanup - the tw pointer can be NULL there sometimes, thus causing
+an oops in NET_NS=y case.
+
+The same place in ipv4 code already works correctly using existing
+net, rather than tw's one.
+
+The bug exists since 2.6.27.
+
+Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv6/inet6_hashtables.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -210,11 +210,11 @@ unique:
+
+ if (twp != NULL) {
+ *twp = tw;
+- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
++ NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+ } else if (tw != NULL) {
+ /* Silly. Should hash-dance instead... */
+ inet_twsk_deschedule(tw, death_row);
+- NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
++ NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+
+ inet_twsk_put(tw);
+ }
--- /dev/null
+From 5209180c240d396274624326a7ac86642db0b505 Mon Sep 17 00:00:00 2001
+From: Jesper Nilsson <jesper.nilsson@axis.com>
+Date: Fri, 27 Mar 2009 00:17:45 -0700
+Subject: ipv6: Plug sk_buff leak in ipv6_rcv (net/ipv6/ip6_input.c)
+
+
+[ Upstream commit 71f6f6dfdf7c7a67462386d9ea05c1095a89c555 ]
+
+Commit 778d80be52699596bf70e0eb0761cf5e1e46088d
+(ipv6: Add disable_ipv6 sysctl to disable IPv6 operaion on specific interface)
+seems to have introduced a leak of sk_buff's for ipv6 traffic,
+at least in some configurations where idev is NULL, or when ipv6
+is disabled via sysctl.
+
+The problem is that if the first condition of the if-statement
+returns non-NULL, it returns an skb with only one reference,
+and when the other conditions apply, execution jumps to the "out"
+label, which does not call kfree_skb for it.
+
+To plug this leak, change to use the "drop" label instead.
+(this relies on it being ok to call kfree_skb on NULL)
+This also allows us to avoid calling rcu_read_unlock here,
+and removes the only user of the "out" label.
+
+Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv6/ip6_input.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -75,8 +75,7 @@ int ipv6_rcv(struct sk_buff *skb, struct
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
+ !idev || unlikely(idev->cnf.disable_ipv6)) {
+ IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+- rcu_read_unlock();
+- goto out;
++ goto drop;
+ }
+
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+@@ -147,7 +146,6 @@ err:
+ drop:
+ rcu_read_unlock();
+ kfree_skb(skb);
+-out:
+ return 0;
+ }
+
--- /dev/null
+From e248c24d0d83f40328b11cddee7cb4fd090ebcf4 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 18 Mar 2009 19:12:42 -0700
+Subject: net: fix sctp breakage
+
+
+[ Upstream commit cb0dc77de0d23615a845e45844a2e22fc224d7fe ]
+
+broken by commit 5e739d1752aca4e8f3e794d431503bfca3162df4; AFAICS should
+be -stable fodder as well...
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Aced-by: Vlad Yasevich <vladislav.yasevich@hp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sctp/endpointola.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -111,7 +111,8 @@ static struct sctp_endpoint *sctp_endpoi
+ if (sctp_addip_enable) {
+ auth_chunks->chunks[0] = SCTP_CID_ASCONF;
+ auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
+- auth_chunks->param_hdr.length += htons(2);
++ auth_chunks->param_hdr.length =
++ htons(sizeof(sctp_paramhdr_t) + 2);
+ }
+ }
+
--- /dev/null
+From 0c6310c4d90558e72e410c3777e29acdec6477e2 Mon Sep 17 00:00:00 2001
+From: Mark H. Weaver <mhw@netris.org>
+Date: Mon, 23 Mar 2009 13:46:12 +0100
+Subject: netfilter: nf_conntrack_tcp: fix unaligned memory access in tcp_sack
+
+
+[ Upstream commit 534f81a5068799799e264fd162e9488a129f98d4 ]
+
+This patch fixes an unaligned memory access in tcp_sack while reading
+sequence numbers from TCP selective acknowledgement options. Prior to
+applying this patch, upstream linux-2.6.27.20 was occasionally
+generating messages like this on my sparc64 system:
+
+ [54678.532071] Kernel unaligned access at TPC[6b17d4] tcp_packet+0xcd4/0xd00
+
+Acked-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/netfilter/nf_conntrack_proto_tcp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -15,6 +15,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/ipv6.h>
+ #include <net/ip6_checksum.h>
++#include <asm/unaligned.h>
+
+ #include <net/tcp.h>
+
+@@ -466,7 +467,7 @@ static void tcp_sack(const struct sk_buf
+ for (i = 0;
+ i < (opsize - TCPOLEN_SACK_BASE);
+ i += TCPOLEN_SACK_PERBLOCK) {
+- tmp = ntohl(*((__be32 *)(ptr+i)+1));
++ tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
+
+ if (after(tmp, *sack))
+ *sack = tmp;
--- /dev/null
+bonding-fix-updating-of-speed-duplex-changes.patch
+net-fix-sctp-breakage.patch
+ipv6-don-t-use-tw-net-when-accounting-for-recycled-tw.patch
+ipv6-plug-sk_buff-leak-in-ipv6_rcv.patch
+netfilter-nf_conntrack_tcp-fix-unaligned-memory-access-in-tcp_sack.patch
+xfrm-spin_lock-should-be-spin_unlock-in-xfrm_state.c.patch
+bridge-bad-error-handling-when-adding-invalid-ether-address.patch
+bas_gigaset-correctly-allocate-usb-interrupt-transfer-buffer.patch
+tcp-fix-various-bugs-wrt.-packet-counting-during-fragmentation.patch
--- /dev/null
+From e42e8ccda949937872452f571638200f515090c2 Mon Sep 17 00:00:00 2001
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Date: Sat, 4 Apr 2009 18:12:41 -0700
+Subject: tcp: Fix various bugs wrt. packet counting during fragmentation.
+
+
+[ Upstream commits: d3d2ae454501a4dec360995649e1b002a2ad90c5,
+ 02276f3c962fd408fa9d441251067845f948bfcf,
+ 797108d134a91afca9fa59c572336b279bc66afb,
+ 9eb9362e569062e2f841b7a023e5fcde10ed63b4 ]
+
+--------------------
+tcp: Don't clear hints when tcp_fragmenting
+
+1) We didn't remove any skbs, so no need to handle stale refs.
+
+2) scoreboard_skb_hint is trivial, no timestamps were changed
+ so no need to clear that one
+
+3) lost_skb_hint needs tweaking similar to that of
+ tcp_sacktag_one().
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+--------------------
+tcp: fix corner case issue in segmentation during rexmitting
+
+If cur_mss grew very recently so that the previously G/TSOed skb
+now fits well into a single segment it would get send up in
+parts unless we calculate # of segments again. This corner-case
+could happen eg. after mtu probe completes or less than
+previously sack blocks are required for the opposite direction.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+--------------------
+tcp: add helper for counter tweaking due mid-wq change
+
+We need full-scale adjustment to fix a TCP miscount in the next
+patch, so just move it into a helper and call for that from the
+other places.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+--------------------
+tcp: miscounts due to tcp_fragment pcount reset
+
+It seems that trivial reset of pcount to one was not sufficient
+in tcp_retransmit_skb. Multiple counters experience a positive
+miscount when skb's pcount gets lowered without the necessary
+adjustments (depending on skb's sacked bits which exactly), at
+worst a packets_out miscount can crash at RTO if the write queue
+is empty!
+
+Triggering this requires mss change, so bidir tcp or mtu probe or
+like.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Reported-by: Markus Trippelsdorf <markus@trippelsdorf.de>
+Tested-by: Uwe Bugla <uwe.bugla@gmx.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/tcp.h | 15 -----------
+ net/ipv4/tcp_output.c | 68 ++++++++++++++++++++++++++++++--------------------
+ 2 files changed, 41 insertions(+), 42 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -610,21 +610,6 @@ static inline int tcp_skb_mss(const stru
+ return skb_shinfo(skb)->gso_size;
+ }
+
+-static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr)
+-{
+- if (*count) {
+- *count -= decr;
+- if ((int)*count < 0)
+- *count = 0;
+- }
+-}
+-
+-static inline void tcp_dec_pcount_approx(__u32 *count,
+- const struct sk_buff *skb)
+-{
+- tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
+-}
+-
+ /* Events passed to congestion control interface */
+ enum tcp_ca_event {
+ CA_EVENT_TX_START, /* first transmit when no packets in flight */
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -751,6 +751,36 @@ static void tcp_adjust_fackets_out(struc
+ tp->fackets_out -= decr;
+ }
+
++/* Pcount in the middle of the write queue got changed, we need to do various
++ * tweaks to fix counters
++ */
++static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ tp->packets_out -= decr;
++
++ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
++ tp->sacked_out -= decr;
++ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
++ tp->retrans_out -= decr;
++ if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
++ tp->lost_out -= decr;
++
++ /* Reno case is special. Sigh... */
++ if (tcp_is_reno(tp) && decr > 0)
++ tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
++
++ tcp_adjust_fackets_out(sk, skb, decr);
++
++ if (tp->lost_skb_hint &&
++ before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
++ (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked))
++ tp->lost_cnt_hint -= decr;
++
++ tcp_verify_left_out(tp);
++}
++
+ /* Function to create two new TCP segments. Shrinks the given segment
+ * to the specified size and appends a new segment with the rest of the
+ * packet to the list. This won't be called frequently, I hope.
+@@ -767,7 +797,6 @@ int tcp_fragment(struct sock *sk, struct
+
+ BUG_ON(len > skb->len);
+
+- tcp_clear_retrans_hints_partial(tp);
+ nsize = skb_headlen(skb) - len;
+ if (nsize < 0)
+ nsize = 0;
+@@ -834,22 +863,8 @@ int tcp_fragment(struct sock *sk, struct
+ int diff = old_factor - tcp_skb_pcount(skb) -
+ tcp_skb_pcount(buff);
+
+- tp->packets_out -= diff;
+-
+- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+- tp->sacked_out -= diff;
+- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
+- tp->retrans_out -= diff;
+-
+- if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
+- tp->lost_out -= diff;
+-
+- /* Adjust Reno SACK estimate. */
+- if (tcp_is_reno(tp) && diff > 0) {
+- tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
+- tcp_verify_left_out(tp);
+- }
+- tcp_adjust_fackets_out(sk, skb, diff);
++ if (diff)
++ tcp_adjust_pcount(sk, skb, diff);
+ }
+
+ /* Link BUFF into the send queue. */
+@@ -1829,22 +1844,14 @@ static void tcp_retrans_try_collapse(str
+ * packet counting does not break.
+ */
+ TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
+- if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS)
+- tp->retrans_out -= tcp_skb_pcount(next_skb);
+- if (TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST)
+- tp->lost_out -= tcp_skb_pcount(next_skb);
+- /* Reno case is special. Sigh... */
+- if (tcp_is_reno(tp) && tp->sacked_out)
+- tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
+-
+- tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
+- tp->packets_out -= tcp_skb_pcount(next_skb);
+
+ /* changed transmit queue under us so clear hints */
+ tcp_clear_retrans_hints_partial(tp);
+ if (next_skb == tp->retransmit_skb_hint)
+ tp->retransmit_skb_hint = skb;
+
++ tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
++
+ sk_wmem_free_skb(sk, next_skb);
+ }
+
+@@ -1945,6 +1952,13 @@ int tcp_retransmit_skb(struct sock *sk,
+ if (skb->len > cur_mss) {
+ if (tcp_fragment(sk, skb, cur_mss, cur_mss))
+ return -ENOMEM; /* We'll try again later. */
++ } else {
++ int oldpcount = tcp_skb_pcount(skb);
++
++ if (unlikely(oldpcount > 1)) {
++ tcp_init_tso_segs(sk, skb, cur_mss);
++ tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
++ }
+ }
+
+ /* Collapse two adjacent packets if worthwhile and we can. */
--- /dev/null
+From 957bc5be87d7dabedac8fc864f1a73a5c376014c Mon Sep 17 00:00:00 2001
+From: Chuck Ebbert <cebbert@redhat.com>
+Date: Fri, 27 Mar 2009 00:22:01 -0700
+Subject: xfrm: spin_lock() should be spin_unlock() in xfrm_state.c
+
+
+[ Upstream commit 7d0b591c655ca0d72ebcbd242cf659a20a8995c5 ]
+
+spin_lock() should be spin_unlock() in xfrm_state_walk_done().
+
+caused by:
+commit 12a169e7d8f4b1c95252d8b04ed0f1033ed7cfe2
+"ipsec: Put dumpers on the dump list"
+
+Reported-by: Marc Milgram <mmilgram@redhat.com>
+Signed-off-by: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+---
+ net/xfrm/xfrm_state.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1601,7 +1601,7 @@ void xfrm_state_walk_done(struct xfrm_st
+
+ spin_lock_bh(&xfrm_state_lock);
+ list_del(&walk->all);
+- spin_lock_bh(&xfrm_state_lock);
++ spin_unlock_bh(&xfrm_state_lock);
+ }
+ EXPORT_SYMBOL(xfrm_state_walk_done);
+