]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
start .29 queue up
authorGreg Kroah-Hartman <gregkh@suse.de>
Wed, 27 May 2009 05:51:33 +0000 (22:51 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 27 May 2009 05:51:33 +0000 (22:51 -0700)
19 files changed:
queue-2.6.29/bonding-fix-alb-mode-locking-regression.patch [new file with mode: 0644]
queue-2.6.29/bonding-remove-debug-printk.patch [new file with mode: 0644]
queue-2.6.29/gianfar-fix-bug-under-load-after-introduction-of-skb-recycling.patch [new file with mode: 0644]
queue-2.6.29/mac8390-fix-regression-caused-during-net_device_ops-conversion.patch [new file with mode: 0644]
queue-2.6.29/myr10ge-again-fix-lro_gen_skb-alignment.patch [new file with mode: 0644]
queue-2.6.29/net-fix-length-computation-in-rt_check_expire.patch [new file with mode: 0644]
queue-2.6.29/net-fix-rtable-leak-in-net-ipv4-route.c.patch [new file with mode: 0644]
queue-2.6.29/net-fix-skb_seq_read-returning-wrong-offset-length-for-page-frag-data.patch [new file with mode: 0644]
queue-2.6.29/pktgen-do-not-access-flows-beyond-its-length.patch [new file with mode: 0644]
queue-2.6.29/r8169-avoid-losing-msi-interrupts.patch [new file with mode: 0644]
queue-2.6.29/revert-rose-zero-length-frame-filtering-in-af_rose.c.patch [new file with mode: 0644]
queue-2.6.29/sch_teql-should-not-dereference-skb-after-ndo_start_xmit.patch [new file with mode: 0644]
queue-2.6.29/series [new file with mode: 0644]
queue-2.6.29/sparc-fix-bus-type-probing-for-esp-and-le-devices.patch [new file with mode: 0644]
queue-2.6.29/sparc64-fix-smp_callin-locking.patch [new file with mode: 0644]
queue-2.6.29/tcp-fix-2-iw-selection.patch [new file with mode: 0644]
queue-2.6.29/tcp-fix-msg_peek-race-check.patch [new file with mode: 0644]
queue-2.6.29/vlan-macvlan-fix-null-pointer-dereferences-in-ethtool-handlers.patch [new file with mode: 0644]
queue-2.6.29/xfrm-wrong-hash-value-for-temporary-sa.patch [new file with mode: 0644]

diff --git a/queue-2.6.29/bonding-fix-alb-mode-locking-regression.patch b/queue-2.6.29/bonding-fix-alb-mode-locking-regression.patch
new file mode 100644 (file)
index 0000000..3c1a5f5
--- /dev/null
@@ -0,0 +1,55 @@
+From 8d3d3afc72016c6f1ca990f48ecab94e59eb6f38 Mon Sep 17 00:00:00 2001
+From: Jay Vosburgh <fubar@us.ibm.com>
+Date: Tue, 26 May 2009 15:29:00 -0700
+Subject: bonding: fix alb mode locking regression
+
+From: Jay Vosburgh <fubar@us.ibm.com>
+
+[ Upstream commit 815bcc2719c12b6f5b511706e2d19728e07f0b02 ]
+
+Fix locking issue in alb MAC address management; removed
+incorrect locking and replaced with correct locking.  This bug was
+introduced in commit 059fe7a578fba5bbb0fdc0365bfcf6218fa25eb0
+("bonding: Convert locks to _bh, rework alb locking for new locking")
+
+       Bug reported by Paul Smith <paul@mad-scientist.net>, who also
+tested the fix.
+
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/bonding/bond_alb.c |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1739,9 +1739,6 @@ int bond_alb_set_mac_address(struct net_
+               }
+       }
+-      write_unlock_bh(&bond->curr_slave_lock);
+-      read_unlock(&bond->lock);
+-
+       if (swap_slave) {
+               alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+               alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+@@ -1749,16 +1746,15 @@ int bond_alb_set_mac_address(struct net_
+               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
+                                      bond->alb_info.rlb_enabled);
++              read_lock(&bond->lock);
+               alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+               if (bond->alb_info.rlb_enabled) {
+                       /* inform clients mac address has changed */
+                       rlb_req_update_slave_clients(bond, bond->curr_active_slave);
+               }
++              read_unlock(&bond->lock);
+       }
+-      read_lock(&bond->lock);
+-      write_lock_bh(&bond->curr_slave_lock);
+-
+       return 0;
+ }
diff --git a/queue-2.6.29/bonding-remove-debug-printk.patch b/queue-2.6.29/bonding-remove-debug-printk.patch
new file mode 100644 (file)
index 0000000..17b572b
--- /dev/null
@@ -0,0 +1,37 @@
+From 3b72ab96a55df99eaf0e290982726f5666e28805 Mon Sep 17 00:00:00 2001
+From: Jay Vosburgh <fubar@us.ibm.com>
+Date: Tue, 14 Apr 2009 16:53:14 -0700
+Subject: bonding: Remove debug printk
+
+From: Jay Vosburgh <fubar@us.ibm.com>
+
+[ Upstream commit 2690f8d62e98779c71625dba9a0fd525d8b2263d ]
+
+       Remove debug printk I accidently left in as part of commit:
+
+commit 6146b1a4da98377e4abddc91ba5856bef8f23f1e
+Author: Jay Vosburgh <fubar@us.ibm.com>
+Date:   Tue Nov 4 17:51:15 2008 -0800
+
+    bonding: Fix ALB mode to balance traffic on VLANs
+
+       Reported by Duncan Gibb <duncan.gibb@siriusit.co.uk>
+
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/bonding/bond_alb.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *
+       if (arp->op_code == htons(ARPOP_REPLY)) {
+               /* update rx hash table for this ARP */
+-              printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
+-                     bond_dev->name);
+               bond = netdev_priv(bond_dev);
+               rlb_update_entry_from_arp(bond, arp);
+               pr_debug("Server received an ARP Reply from client\n");
diff --git a/queue-2.6.29/gianfar-fix-bug-under-load-after-introduction-of-skb-recycling.patch b/queue-2.6.29/gianfar-fix-bug-under-load-after-introduction-of-skb-recycling.patch
new file mode 100644 (file)
index 0000000..8b3aec3
--- /dev/null
@@ -0,0 +1,54 @@
+From 1c2b64b6fe9c43ce6f81bb655ca6c4df1a06eb9e Mon Sep 17 00:00:00 2001
+From: Lennert Buytenhek <buytenh@wantstofly.org>
+Date: Tue, 26 May 2009 15:26:22 -0700
+Subject: gianfar: fix BUG under load after introduction of skb recycling
+
+From: Lennert Buytenhek <buytenh@wantstofly.org>
+
+[ Upstream commit 4e2fd555199977c5994d1a4d2d3b8761b20ca4c7 ]
+
+Since commit 0fd56bb5be6455d0d42241e65aed057244665e5e ("gianfar:
+Add support for skb recycling"), gianfar puts skbuffs that are in
+the rx ring back onto the recycle list as-is in case there was a
+receive error, but this breaks the following invariant: that all
+skbuffs on the recycle list have skb->data = skb->head + NET_SKB_PAD.
+
+The RXBUF_ALIGNMENT realignment done in gfar_new_skb() will be done
+twice on skbuffs recycled in this way, causing there not to be enough
+room in the skb anymore to receive a full packet, eventually leading
+to an skb_over_panic from gfar_clean_rx_ring() -> skb_put().
+
+Resetting the skb->data pointer to skb->head + NET_SKB_PAD before
+putting the skb back onto the recycle list restores the mentioned
+invariant, and should fix this issue.
+
+Reported-by: Michael Guntsche <mike@it-loops.com>
+Tested-by: Michael Guntsche <mike@it-loops.com>
+Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/gianfar.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/gianfar.c
++++ b/drivers/net/gianfar.c
+@@ -1822,8 +1822,17 @@ int gfar_clean_rx_ring(struct net_device
+                       if (unlikely(!newskb))
+                               newskb = skb;
+-                      else if (skb)
++                      else if (skb) {
++                              /*
++                               * We need to reset ->data to what it
++                               * was before gfar_new_skb() re-aligned
++                               * it to an RXBUF_ALIGNMENT boundary
++                               * before we put the skb back on the
++                               * recycle list.
++                               */
++                              skb->data = skb->head + NET_SKB_PAD;
+                               dev_kfree_skb_any(skb);
++                      }
+               } else {
+                       /* Increment the number of packets */
+                       dev->stats.rx_packets++;
diff --git a/queue-2.6.29/mac8390-fix-regression-caused-during-net_device_ops-conversion.patch b/queue-2.6.29/mac8390-fix-regression-caused-during-net_device_ops-conversion.patch
new file mode 100644 (file)
index 0000000..f06c53c
--- /dev/null
@@ -0,0 +1,63 @@
+From fd63753a076d324bd18dfa51bc4906ca5ee6b53a Mon Sep 17 00:00:00 2001
+From: Finn Thain <fthain@telegraphics.com.au>
+Date: Mon, 25 May 2009 22:43:49 -0700
+Subject: mac8390: fix regression caused during net_device_ops conversion
+
+From: Finn Thain <fthain@telegraphics.com.au>
+
+[ Upstream commit 217cbfa856dc1cbc2890781626c4032d9e3ec59f ]
+
+Changeset ca17584bf2ad1b1e37a5c0e4386728cc5fc9dabc ("mac8390: update
+to net_device_ops") broke mac8390 by adding 8390.o to the link. That
+meant that lib8390.c was included twice, once in mac8390.c and once in
+8390.c, subject to different macros. This patch reverts that by
+avoiding the wrappers in 8390.c. They seem to be of no value since
+COMPAT_NET_DEV_OPS is going away soon.
+
+Tested with a Kinetics EtherPort card.
+
+Signed-off-by: Finn Thain <fthain@telegraphics.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/Makefile  |    2 +-
+ drivers/net/mac8390.c |   10 +++++-----
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/mac8390.c
++++ b/drivers/net/mac8390.c
+@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe
+       if (!MACH_IS_MAC)
+               return ERR_PTR(-ENODEV);
+-      dev = alloc_ei_netdev();
++      dev = ____alloc_ei_netdev(0);
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+@@ -481,10 +481,10 @@ void cleanup_module(void)
+ static const struct net_device_ops mac8390_netdev_ops = {
+       .ndo_open               = mac8390_open,
+       .ndo_stop               = mac8390_close,
+-      .ndo_start_xmit         = ei_start_xmit,
+-      .ndo_tx_timeout         = ei_tx_timeout,
+-      .ndo_get_stats          = ei_get_stats,
+-      .ndo_set_multicast_list = ei_set_multicast_list,
++      .ndo_start_xmit         = __ei_start_xmit,
++      .ndo_tx_timeout         = __ei_tx_timeout,
++      .ndo_get_stats          = __ei_get_stats,
++      .ndo_set_multicast_list = __ei_set_multicast_list,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -100,7 +100,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
+ obj-$(CONFIG_NET) += Space.o loopback.o
+ obj-$(CONFIG_SEEQ8005) += seeq8005.o
+ obj-$(CONFIG_NET_SB1000) += sb1000.o
+-obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
++obj-$(CONFIG_MAC8390) += mac8390.o
+ obj-$(CONFIG_APNE) += apne.o 8390.o
+ obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
+ obj-$(CONFIG_HP100) += hp100.o
diff --git a/queue-2.6.29/myr10ge-again-fix-lro_gen_skb-alignment.patch b/queue-2.6.29/myr10ge-again-fix-lro_gen_skb-alignment.patch
new file mode 100644 (file)
index 0000000..8475bc2
--- /dev/null
@@ -0,0 +1,32 @@
+From a79718ed9f94efcb363de4198168969d27569c25 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Wed, 15 Apr 2009 02:26:49 -0700
+Subject: myr10ge: again fix lro_gen_skb() alignment
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+[ Upstream commit 636d2f68a0814d84de26c021b2c15e3b4ffa29de ]
+
+Add LRO alignment initially committed in
+621544eb8c3beaa859c75850f816dd9b056a00a3 ("[LRO]: fix lro_gen_skb()
+alignment") and removed in 0dcffac1a329be69bab0ac604bf7283737108e68
+("myri10ge: add multislices support") during conversion to
+multi-slice.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/myri10ge/myri10ge.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/myri10ge/myri10ge.c
++++ b/drivers/net/myri10ge/myri10ge.c
+@@ -2446,6 +2446,7 @@ static int myri10ge_open(struct net_devi
+               lro_mgr->lro_arr = ss->rx_done.lro_desc;
+               lro_mgr->get_frag_header = myri10ge_get_frag_header;
+               lro_mgr->max_aggr = myri10ge_lro_max_pkts;
++              lro_mgr->frag_align_pad = 2;
+               if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
+                       lro_mgr->max_aggr = MAX_SKB_FRAGS;
diff --git a/queue-2.6.29/net-fix-length-computation-in-rt_check_expire.patch b/queue-2.6.29/net-fix-length-computation-in-rt_check_expire.patch
new file mode 100644 (file)
index 0000000..202c229
--- /dev/null
@@ -0,0 +1,53 @@
+From a99a7b580bcb2d04a5d971f73fef790945ad458f Mon Sep 17 00:00:00 2001
+yyFrom: Eric Dumazet <dada1@cosmosbay.com>
+Date: Tue, 19 May 2009 18:54:22 +0000
+Subject: net: fix length computation in rt_check_expire()
+
+From: Eric Dumazet <dada1@cosmosbay.com>
+
+[ Upstream commit cf8da764fc6959b7efb482f375dfef9830e98205 ]
+
+rt_check_expire() computes average and standard deviation of chain lengths,
+but not correclty reset length to 0 at beginning of each chain.
+This probably gives overflows for sum2 (and sum) on loaded machines instead
+of meaningful results.
+
+Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/route.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -785,7 +785,7 @@ static void rt_check_expire(void)
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth, **rthp;
+-      unsigned long length = 0, samples = 0;
++      unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       u64 mult;
+@@ -795,9 +795,9 @@ static void rt_check_expire(void)
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+-      length = 0;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
++              unsigned long length;
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+@@ -809,6 +809,7 @@ static void rt_check_expire(void)
+               if (*rthp == NULL)
+                       continue;
++              length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = *rthp) != NULL) {
+                       if (rt_is_expired(rth)) {
diff --git a/queue-2.6.29/net-fix-rtable-leak-in-net-ipv4-route.c.patch b/queue-2.6.29/net-fix-rtable-leak-in-net-ipv4-route.c.patch
new file mode 100644 (file)
index 0000000..7d258e8
--- /dev/null
@@ -0,0 +1,177 @@
+From 7c4c4ea7b1a79a55b4ed83c61151027f76078736 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <dada1@cosmosbay.com>
+Date: Tue, 19 May 2009 20:14:28 +0000
+Subject: net: fix rtable leak in net/ipv4/route.c
+
+From: Eric Dumazet <dada1@cosmosbay.com>
+
+[ Upstream commit 1ddbcb005c395518c2cd0df504cff3d4b5c85853 ]
+
+Alexander V. Lukyanov found a regression in 2.6.29 and made a complete
+analysis found in http://bugzilla.kernel.org/show_bug.cgi?id=13339
+Quoted here because its a perfect one :
+
+begin_of_quotation
+ 2.6.29 patch has introduced flexible route cache rebuilding. Unfortunately the
+ patch has at least one critical flaw, and another problem.
+
+ rt_intern_hash calculates rthi pointer, which is later used for new entry
+ insertion. The same loop calculates cand pointer which is used to clean the
+ list. If the pointers are the same, rtable leak occurs, as first the cand is
+ removed then the new entry is appended to it.
+
+ This leak leads to unregister_netdevice problem (usage count > 0).
+
+ Another problem of the patch is that it tries to insert the entries in certain
+ order, to facilitate counting of entries distinct by all but QoS parameters.
+ Unfortunately, referencing an existing rtable entry moves it to list beginning,
+ to speed up further lookups, so the carefully built order is destroyed.
+
+ For the first problem the simplest patch it to set rthi=0 when rthi==cand, but
+ it will also destroy the ordering.
+end_of_quotation
+
+Problematic commit is 1080d709fb9d8cd4392f93476ee46a9d6ea05a5b
+(net: implement emergency route cache rebulds when gc_elasticity is exceeded)
+
+Trying to keep dst_entries ordered is too complex and breaks the fact that
+order should depend on the frequency of use for garbage collection.
+
+A possible fix is to make rt_intern_hash() simpler, and only makes
+rt_check_expire() a litle bit smarter, being able to cope with an arbitrary
+entries order. The added loop is running on cache hot data, while cpu
+is prefetching next object, so should be unnoticied.
+
+Reported-and-analyzed-by: Alexander V. Lukyanov <lav@yar.ru>
+Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/route.c |   55 +++++++++++++++++--------------------------------------
+ 1 file changed, 17 insertions(+), 38 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -784,7 +784,7 @@ static void rt_check_expire(void)
+ {
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+-      struct rtable *rth, **rthp;
++      struct rtable *rth, *aux, **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       u64 mult;
+@@ -812,6 +812,7 @@ static void rt_check_expire(void)
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = *rthp) != NULL) {
++                      prefetch(rth->u.dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->u.dst.rt_next;
+                               rt_free(rth);
+@@ -820,33 +821,30 @@ static void rt_check_expire(void)
+                       if (rth->u.dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->u.dst.expires)) {
++nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->u.dst.rt_next;
+                                       /*
+-                                       * Only bump our length if the hash
+-                                       * inputs on entries n and n+1 are not
+-                                       * the same, we only count entries on
++                                       * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+-                                      if ((*rthp == NULL) ||
+-                                          !compare_hash_inputs(&(*rthp)->fl,
+-                                                               &rth->fl))
+-                                              length += ONE;
++                                      for (aux = rt_hash_table[i].chain;;) {
++                                              if (aux == rth) {
++                                                      length += ONE;
++                                                      break;
++                                              }
++                                              if (compare_hash_inputs(&aux->fl, &rth->fl))
++                                                      break;
++                                              aux = aux->u.dst.rt_next;
++                                      }
+                                       continue;
+                               }
+-                      } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
+-                              tmo >>= 1;
+-                              rthp = &rth->u.dst.rt_next;
+-                              if ((*rthp == NULL) ||
+-                                  !compare_hash_inputs(&(*rthp)->fl,
+-                                                       &rth->fl))
+-                                      length += ONE;
+-                              continue;
+-                      }
++                      } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
++                              goto nofree;
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->u.dst.rt_next;
+@@ -1069,7 +1067,6 @@ out:     return 0;
+ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
+ {
+       struct rtable   *rth, **rthp;
+-      struct rtable   *rthi;
+       unsigned long   now;
+       struct rtable *cand, **candp;
+       u32             min_score;
+@@ -1089,7 +1086,6 @@ restart:
+       }
+       rthp = &rt_hash_table[hash].chain;
+-      rthi = NULL;
+       spin_lock_bh(rt_hash_lock_addr(hash));
+       while ((rth = *rthp) != NULL) {
+@@ -1135,17 +1131,6 @@ restart:
+               chain_length++;
+               rthp = &rth->u.dst.rt_next;
+-
+-              /*
+-               * check to see if the next entry in the chain
+-               * contains the same hash input values as rt.  If it does
+-               * This is where we will insert into the list, instead of
+-               * at the head.  This groups entries that differ by aspects not
+-               * relvant to the hash function together, which we use to adjust
+-               * our chain length
+-               */
+-              if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
+-                      rthi = rth;
+       }
+       if (cand) {
+@@ -1206,10 +1191,7 @@ restart:
+               }
+       }
+-      if (rthi)
+-              rt->u.dst.rt_next = rthi->u.dst.rt_next;
+-      else
+-              rt->u.dst.rt_next = rt_hash_table[hash].chain;
++      rt->u.dst.rt_next = rt_hash_table[hash].chain;
+ #if RT_CACHE_DEBUG >= 2
+       if (rt->u.dst.rt_next) {
+@@ -1225,10 +1207,7 @@ restart:
+        * previous writes to rt are comitted to memory
+        * before making rt visible to other CPUS.
+        */
+-      if (rthi)
+-              rcu_assign_pointer(rthi->u.dst.rt_next, rt);
+-      else
+-              rcu_assign_pointer(rt_hash_table[hash].chain, rt);
++      rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+       spin_unlock_bh(rt_hash_lock_addr(hash));
+       *rp = rt;
diff --git a/queue-2.6.29/net-fix-skb_seq_read-returning-wrong-offset-length-for-page-frag-data.patch b/queue-2.6.29/net-fix-skb_seq_read-returning-wrong-offset-length-for-page-frag-data.patch
new file mode 100644 (file)
index 0000000..685edf3
--- /dev/null
@@ -0,0 +1,33 @@
+From 38c99a9ef2af39226b875ad264f55e8e427daf80 Mon Sep 17 00:00:00 2001
+From: Thomas Chenault <thomas_chenault@dell.com>
+Date: Mon, 18 May 2009 21:43:27 -0700
+Subject: net: fix skb_seq_read returning wrong offset/length for page frag data
+
+From: Thomas Chenault <thomas_chenault@dell.com>
+
+[ Upstream commit 995b337952cdf7e05d288eede580257b632a8343 ]
+
+When called with a consumed value that is less than skb_headlen(skb)
+bytes into a page frag, skb_seq_read() incorrectly returns an
+offset/length relative to skb->data. Ensure that data which should come
+from a page frag does.
+
+Signed-off-by: Thomas Chenault <thomas_chenault@dell.com>
+Tested-by: Shyam Iyer <shyam_iyer@dell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/skbuff.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2206,7 +2206,7 @@ unsigned int skb_seq_read(unsigned int c
+ next_skb:
+       block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
+-      if (abs_offset < block_limit) {
++      if (abs_offset < block_limit && !st->frag_data) {
+               *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
+               return block_limit - abs_offset;
+       }
diff --git a/queue-2.6.29/pktgen-do-not-access-flows-beyond-its-length.patch b/queue-2.6.29/pktgen-do-not-access-flows-beyond-its-length.patch
new file mode 100644 (file)
index 0000000..4542d12
--- /dev/null
@@ -0,0 +1,31 @@
+From e3a271af8cef0c6fa9251e949c872d4011589386 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Thu, 21 May 2009 15:07:12 -0700
+Subject: pktgen: do not access flows[] beyond its length
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 5b5f792a6a9a2f9ae812d151ed621f72e99b1725 ]
+
+typo -- pkt_dev->nflows is for stats only, the number of concurrent
+flows is stored in cflows.
+
+Reported-By: Vladimir Ivashchenko <hazard@francoudi.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/pktgen.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktge
+       if (pkt_dev->cflows) {
+               /* let go of the SAs if we have them */
+               int i = 0;
+-              for (;  i < pkt_dev->nflows; i++){
++              for (;  i < pkt_dev->cflows; i++) {
+                       struct xfrm_state *x = pkt_dev->flows[i].x;
+                       if (x) {
+                               xfrm_state_put(x);
diff --git a/queue-2.6.29/r8169-avoid-losing-msi-interrupts.patch b/queue-2.6.29/r8169-avoid-losing-msi-interrupts.patch
new file mode 100644 (file)
index 0000000..5ec6894
--- /dev/null
@@ -0,0 +1,159 @@
+From 5b1a878a50c43133375b5acec474bd528ef24a2e Mon Sep 17 00:00:00 2001
+From: Michael Buesch <mb@bu3sch.de>
+Date: Fri, 22 May 2009 23:24:28 +0000
+Subject: r8169: avoid losing MSI interrupts
+
+From: Michael Buesch <mb@bu3sch.de>
+
+[ Upstream commit f11a377b3f4e897d11f0e8d1fc688667e2f19708 ]
+
+The 8169 chip only generates MSI interrupts when all enabled event
+sources are quiescent and one or more sources transition to active. If
+not all of the active events are acknowledged, or a new event becomes
+active while the existing ones are cleared in the handler, we will not
+see a new interrupt.
+
+The current interrupt handler masks off the Rx and Tx events once the
+NAPI handler has been scheduled, which opens a race window in which we
+can get another Rx or Tx event and never ACK'ing it, stopping all
+activity until the link is reset (ifconfig down/up). Fix this by always
+ACK'ing all event sources, and loop in the handler until we have all
+sources quiescent.
+
+Signed-off-by: David Dillow <dave@thedillows.org>
+Tested-by: Michael Buesch <mb@bu3sch.de>
+Tested-by: Michael Riepe <michael.riepe@googlemail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/r8169.c |  102 +++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 57 insertions(+), 45 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int
+       int handled = 0;
+       int status;
++      /* loop handling interrupts until we have no new ones or
++       * we hit a invalid/hotplug case.
++       */
+       status = RTL_R16(IntrStatus);
++      while (status && status != 0xffff) {
++              handled = 1;
+-      /* hotplug/major error/no more work/shared irq */
+-      if ((status == 0xffff) || !status)
+-              goto out;
+-
+-      handled = 1;
++              /* Handle all of the error cases first. These will reset
++               * the chip, so just exit the loop.
++               */
++              if (unlikely(!netif_running(dev))) {
++                      rtl8169_asic_down(ioaddr);
++                      break;
++              }
+-      if (unlikely(!netif_running(dev))) {
+-              rtl8169_asic_down(ioaddr);
+-              goto out;
+-      }
++              /* Work around for rx fifo overflow */
++              if (unlikely(status & RxFIFOOver) &&
++              (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
++                      netif_stop_queue(dev);
++                      rtl8169_tx_timeout(dev);
++                      break;
++              }
+-      status &= tp->intr_mask;
+-      RTL_W16(IntrStatus,
+-              (status & RxFIFOOver) ? (status | RxOverflow) : status);
++              if (unlikely(status & SYSErr)) {
++                      rtl8169_pcierr_interrupt(dev);
++                      break;
++              }
+-      if (!(status & tp->intr_event))
+-              goto out;
++              if (status & LinkChg)
++                      rtl8169_check_link_status(dev, tp, ioaddr);
+-      /* Work around for rx fifo overflow */
+-      if (unlikely(status & RxFIFOOver) &&
+-          (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+-              netif_stop_queue(dev);
+-              rtl8169_tx_timeout(dev);
+-              goto out;
+-      }
++              /* We need to see the lastest version of tp->intr_mask to
++               * avoid ignoring an MSI interrupt and having to wait for
++               * another event which may never come.
++               */
++              smp_rmb();
++              if (status & tp->intr_mask & tp->napi_event) {
++                      RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
++                      tp->intr_mask = ~tp->napi_event;
++
++                      if (likely(netif_rx_schedule_prep(&tp->napi)))
++                              __netif_rx_schedule(&tp->napi);
++                      else if (netif_msg_intr(tp)) {
++                              printk(KERN_INFO "%s: interrupt %04x in poll\n",
++                              dev->name, status);
++                      }
++              }
+-      if (unlikely(status & SYSErr)) {
+-              rtl8169_pcierr_interrupt(dev);
+-              goto out;
++              /* We only get a new MSI interrupt when all active irq
++               * sources on the chip have been acknowledged. So, ack
++               * everything we've seen and check if new sources have become
++               * active to avoid blocking all interrupts from the chip.
++               */
++              RTL_W16(IntrStatus,
++                      (status & RxFIFOOver) ? (status | RxOverflow) : status);
++              status = RTL_R16(IntrStatus);
+       }
+-      if (status & LinkChg)
+-              rtl8169_check_link_status(dev, tp, ioaddr);
+-
+-      if (status & tp->napi_event) {
+-              RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+-              tp->intr_mask = ~tp->napi_event;
+-
+-              if (likely(netif_rx_schedule_prep(&tp->napi)))
+-                      __netif_rx_schedule(&tp->napi);
+-              else if (netif_msg_intr(tp)) {
+-                      printk(KERN_INFO "%s: interrupt %04x in poll\n",
+-                             dev->name, status);
+-              }
+-      }
+-out:
+       return IRQ_RETVAL(handled);
+ }
+@@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_stru
+       if (work_done < budget) {
+               netif_rx_complete(napi);
+-              tp->intr_mask = 0xffff;
+-              /*
+-               * 20040426: the barrier is not strictly required but the
+-               * behavior of the irq handler could be less predictable
+-               * without it. Btw, the lack of flush for the posted pci
+-               * write is safe - FR
++
++              /* We need for force the visibility of tp->intr_mask
++               * for other CPUs, as we can loose an MSI interrupt
++               * and potentially wait for a retransmit timeout if we don't.
++               * The posted write to IntrMask is safe, as it will
++               * eventually make it to the chip and we won't loose anything
++               * until it does.
+                */
++              tp->intr_mask = 0xffff;
+               smp_wmb();
+               RTL_W16(IntrMask, tp->intr_event);
+       }
diff --git a/queue-2.6.29/revert-rose-zero-length-frame-filtering-in-af_rose.c.patch b/queue-2.6.29/revert-rose-zero-length-frame-filtering-in-af_rose.c.patch
new file mode 100644 (file)
index 0000000..4734a6d
--- /dev/null
@@ -0,0 +1,49 @@
+From f0439793847777a72abe3aeeb40947e99c347799 Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Tue, 14 Apr 2009 20:28:00 -0700
+Subject: Revert "rose: zero length frame filtering in af_rose.c"
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 6fd4777a1fec1f7757b5a302ad3fdcc1eae2abba ]
+
+This reverts commit 244f46ae6e9e18f6fc0be7d1f49febde4762c34b.
+
+Alan Cox did the research, and just like the other radio protocols
+zero-length frames have meaning because at the top level ROSE is
+X.25 PLP.
+
+So this zero-length filtering is invalid.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/rose/af_rose.c |   10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *io
+       unsigned char *asmptr;
+       int n, size, qbit = 0;
+-      /* ROSE empty frame has no meaning : don't send */
+-      if (len == 0)
+-              return 0;
+-
+       if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
+               return -EINVAL;
+@@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *io
+       skb_reset_transport_header(skb);
+       copied     = skb->len;
+-      /* ROSE empty frame has no meaning : ignore it */
+-      if (copied == 0) {
+-              skb_free_datagram(sk, skb);
+-              return copied;
+-      }
+-
+       if (copied > size) {
+               copied = size;
+               msg->msg_flags |= MSG_TRUNC;
diff --git a/queue-2.6.29/sch_teql-should-not-dereference-skb-after-ndo_start_xmit.patch b/queue-2.6.29/sch_teql-should-not-dereference-skb-after-ndo_start_xmit.patch
new file mode 100644 (file)
index 0000000..06a1389
--- /dev/null
@@ -0,0 +1,44 @@
+From 92ffbfee6be8d68e60071b9e07572f6fbc973660 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <dada1@cosmosbay.com>
+Date: Mon, 18 May 2009 15:12:31 -0700
+Subject: sch_teql: should not dereference skb after ndo_start_xmit()
+
+From: Eric Dumazet <dada1@cosmosbay.com>
+
+[ Upstream commit c0f84d0d4be3f7d818b4ffb04d27f9bae64397f0 ]
+
+It is illegal to dereference a skb after a successful ndo_start_xmit()
+call. We must store skb length in a local variable instead.
+
+Bug was introduced in 2.6.27 by commit 0abf77e55a2459aa9905be4b226e4729d5b4f0cb
+(net_sched: Add accessor function for packet length for qdiscs)
+
+Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sched/sch_teql.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -303,6 +303,8 @@ restart:
+               switch (teql_resolve(skb, skb_res, slave)) {
+               case 0:
+                       if (__netif_tx_trylock(slave_txq)) {
++                              unsigned int length = qdisc_pkt_len(skb);
++
+                               if (!netif_tx_queue_stopped(slave_txq) &&
+                                   !netif_tx_queue_frozen(slave_txq) &&
+                                   slave_ops->ndo_start_xmit(skb, slave) == 0) {
+@@ -310,8 +312,7 @@ restart:
+                                       master->slaves = NEXT_SLAVE(q);
+                                       netif_wake_queue(dev);
+                                       master->stats.tx_packets++;
+-                                      master->stats.tx_bytes +=
+-                                              qdisc_pkt_len(skb);
++                                      master->stats.tx_bytes += length;
+                                       return 0;
+                               }
+                               __netif_tx_unlock(slave_txq);
diff --git a/queue-2.6.29/series b/queue-2.6.29/series
new file mode 100644 (file)
index 0000000..b244b1c
--- /dev/null
@@ -0,0 +1,18 @@
+xfrm-wrong-hash-value-for-temporary-sa.patch
+tcp-fix-msg_peek-race-check.patch
+tcp-fix-2-iw-selection.patch
+net-fix-skb_seq_read-returning-wrong-offset-length-for-page-frag-data.patch
+sch_teql-should-not-dereference-skb-after-ndo_start_xmit.patch
+net-fix-length-computation-in-rt_check_expire.patch
+net-fix-rtable-leak-in-net-ipv4-route.c.patch
+revert-rose-zero-length-frame-filtering-in-af_rose.c.patch
+pktgen-do-not-access-flows-beyond-its-length.patch
+myr10ge-again-fix-lro_gen_skb-alignment.patch
+vlan-macvlan-fix-null-pointer-dereferences-in-ethtool-handlers.patch
+mac8390-fix-regression-caused-during-net_device_ops-conversion.patch
+gianfar-fix-bug-under-load-after-introduction-of-skb-recycling.patch
+bonding-fix-alb-mode-locking-regression.patch
+bonding-remove-debug-printk.patch
+r8169-avoid-losing-msi-interrupts.patch
+sparc-fix-bus-type-probing-for-esp-and-le-devices.patch
+sparc64-fix-smp_callin-locking.patch
diff --git a/queue-2.6.29/sparc-fix-bus-type-probing-for-esp-and-le-devices.patch b/queue-2.6.29/sparc-fix-bus-type-probing-for-esp-and-le-devices.patch
new file mode 100644 (file)
index 0000000..263a88e
--- /dev/null
@@ -0,0 +1,93 @@
+From 9ec78d3a870424bb8dd81b89a0ec22c032c67fa8 Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Fri, 17 Apr 2009 04:14:15 -0700
+Subject: sparc: Fix bus type probing for ESP and LE devices.
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 956d039a2537cf79ca608450d36cc70e0e515482 ]
+
+If there is a dummy "espdma" or "ledma" parent device above ESP scsi
+or LE ethernet device nodes, we have to match the bus as SBUS.
+
+Otherwise the address and size cell counts are wrong and we don't
+calculate the final physical device resource values correctly at all.
+
+Commit 5280267c1dddb8d413595b87dc406624bb497946 ("sparc: Fix handling
+of LANCE and ESP parent nodes in of_device.c") was meant to fix this
+problem, but that only influences the inner loop of
+build_device_resources().  We need this logic to also kick in at the
+beginning of build_device_resources() as well, when we make the first
+attempt to determine the device's immediate parent bus type for 'reg'
+property element extraction.
+
+Based almost entirely upon a patch by Friedrich Oslage.
+
+Tested-by: Meelis Roos <mroos@linux.ee>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/sparc/kernel/of_device_32.c |   21 +++++++++++++++++++--
+ arch/sparc/kernel/of_device_64.c |   21 +++++++++++++++++++--
+ 2 files changed, 38 insertions(+), 4 deletions(-)
+
+--- a/arch/sparc/kernel/of_device_32.c
++++ b/arch/sparc/kernel/of_device_32.c
+@@ -246,8 +246,25 @@ static unsigned long of_bus_pci_get_flag
+ static int of_bus_sbus_match(struct device_node *np)
+ {
+-      return !strcmp(np->name, "sbus") ||
+-              !strcmp(np->name, "sbi");
++      struct device_node *dp = np;
++
++      while (dp) {
++              if (!strcmp(dp->name, "sbus") ||
++                  !strcmp(dp->name, "sbi"))
++                      return 1;
++
++              /* Have a look at use_1to1_mapping().  We're trying
++               * to match SBUS if that's the top-level bus and we
++               * don't have some intervening real bus that provides
++               * ranges based translations.
++               */
++              if (of_find_property(dp, "ranges", NULL) != NULL)
++                      break;
++
++              dp = dp->parent;
++      }
++
++      return 0;
+ }
+ static void of_bus_sbus_count_cells(struct device_node *child,
+--- a/arch/sparc/kernel/of_device_64.c
++++ b/arch/sparc/kernel/of_device_64.c
+@@ -301,8 +301,25 @@ static unsigned long of_bus_pci_get_flag
+ static int of_bus_sbus_match(struct device_node *np)
+ {
+-      return !strcmp(np->name, "sbus") ||
+-              !strcmp(np->name, "sbi");
++      struct device_node *dp = np;
++
++      while (dp) {
++              if (!strcmp(dp->name, "sbus") ||
++                  !strcmp(dp->name, "sbi"))
++                      return 1;
++
++              /* Have a look at use_1to1_mapping().  We're trying
++               * to match SBUS if that's the top-level bus and we
++               * don't have some intervening real bus that provides
++               * ranges based translations.
++               */
++              if (of_find_property(dp, "ranges", NULL) != NULL)
++                      break;
++
++              dp = dp->parent;
++      }
++
++      return 0;
+ }
+ static void of_bus_sbus_count_cells(struct device_node *child,
diff --git a/queue-2.6.29/sparc64-fix-smp_callin-locking.patch b/queue-2.6.29/sparc64-fix-smp_callin-locking.patch
new file mode 100644 (file)
index 0000000..9d63ec4
--- /dev/null
@@ -0,0 +1,34 @@
+From b0e83144208c22c402cdeabd8d15143aa6ee654b Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Wed, 8 Apr 2009 21:06:35 -0700
+Subject: sparc64: Fix smp_callin() locking.
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 8e255baa449df3049a8827a7f1f4f12b6921d0d1 ]
+
+Interrupts must be disabled when taking the IPI lock.
+
+Caught by lockdep.
+
+Reported-by: Meelis Roos <mroos@linux.ee>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ arch/sparc/kernel/smp_64.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
+       while (!cpu_isset(cpuid, smp_commenced_mask))
+               rmb();
+-      ipi_call_lock();
++      ipi_call_lock_irq();
+       cpu_set(cpuid, cpu_online_map);
+-      ipi_call_unlock();
++      ipi_call_unlock_irq();
+       /* idle thread is expected to have preempt disabled */
+       preempt_disable();
diff --git a/queue-2.6.29/tcp-fix-2-iw-selection.patch b/queue-2.6.29/tcp-fix-2-iw-selection.patch
new file mode 100644 (file)
index 0000000..84f7c06
--- /dev/null
@@ -0,0 +1,39 @@
+From 163cd63891e0583d01a49eb7fca89266d8cad1b6 Mon Sep 17 00:00:00 2001
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Date: Tue, 14 Apr 2009 02:08:53 -0700
+Subject: [PATCH 03/16] tcp: fix >2 iw selection
+
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+
+[ Upstream commit 86bcebafc5e7f5163ccf828792fe694b112ed6fa ]
+
+A long-standing feature in tcp_init_metrics() is such that
+any of its goto reset prevents call to tcp_init_cwnd().
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/tcp_input.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -927,6 +927,8 @@ static void tcp_init_metrics(struct sock
+       tcp_set_rto(sk);
+       if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
+               goto reset;
++
++cwnd:
+       tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
+       return;
+@@ -941,6 +943,7 @@ reset:
+               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
+               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+       }
++      goto cwnd;
+ }
+ static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/queue-2.6.29/tcp-fix-msg_peek-race-check.patch b/queue-2.6.29/tcp-fix-msg_peek-race-check.patch
new file mode 100644 (file)
index 0000000..721a9ff
--- /dev/null
@@ -0,0 +1,60 @@
+From 0e5ea9ad3898d14e4a750ee35569502e3cf1a58a Mon Sep 17 00:00:00 2001
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Date: Sun, 10 May 2009 20:32:34 +0000
+Subject: tcp: fix MSG_PEEK race check
+
+From: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+
+[ Upstream commit 775273131810caa41dfc7f9e552ea5d8508caf40 ]
+
+Commit 518a09ef11 (tcp: Fix recvmsg MSG_PEEK influence of
+blocking behavior) lets the loop run longer than the race check
+did previously expect, so we need to be more careful with this
+check and consider the work we have been doing.
+
+I tried my best to deal with urg hole madness too which happens
+here:
+       if (!sock_flag(sk, SOCK_URGINLINE)) {
+               ++*seq;
+               ...
+by using additional offset by one but I certainly have very
+little interest in testing that part.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Tested-by: Frans Pop <elendil@planet.nl>
+Tested-by: Ian Zimmermann <itz@buug.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/tcp.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1286,6 +1286,7 @@ int tcp_recvmsg(struct kiocb *iocb, stru
+       struct task_struct *user_recv = NULL;
+       int copied_early = 0;
+       struct sk_buff *skb;
++      u32 urg_hole = 0;
+       lock_sock(sk);
+@@ -1497,7 +1498,8 @@ do_prequeue:
+                               }
+                       }
+               }
+-              if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
++              if ((flags & MSG_PEEK) &&
++                  (peek_seq - copied - urg_hole != tp->copied_seq)) {
+                       if (net_ratelimit())
+                               printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
+                                      current->comm, task_pid_nr(current));
+@@ -1518,6 +1520,7 @@ do_prequeue:
+                               if (!urg_offset) {
+                                       if (!sock_flag(sk, SOCK_URGINLINE)) {
+                                               ++*seq;
++                                              urg_hole++;
+                                               offset++;
+                                               used--;
+                                               if (!used)
diff --git a/queue-2.6.29/vlan-macvlan-fix-null-pointer-dereferences-in-ethtool-handlers.patch b/queue-2.6.29/vlan-macvlan-fix-null-pointer-dereferences-in-ethtool-handlers.patch
new file mode 100644 (file)
index 0000000..ca96451
--- /dev/null
@@ -0,0 +1,65 @@
+From dafbd702bc6cb655ce4969ec24a317b6ff5ac129 Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Fri, 17 Apr 2009 15:59:23 -0700
+Subject: vlan/macvlan: fix NULL pointer dereferences in ethtool handlers
+
+From: Patrick McHardy <kaber@trash.net>
+
+[ Upstream commit 7816a0a862d851d0b05710e7d94bfe390f3180e2 ]
+
+Check whether the underlying device provides a set of ethtool ops before
+checking for individual handlers to avoid NULL pointer dereferences.
+
+Reported-by: Art van Breemen <ard@telegraafnet.nl>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/macvlan.c |    9 ++++++---
+ net/8021q/vlan_dev.c  |    3 ++-
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -329,7 +329,8 @@ static u32 macvlan_ethtool_get_rx_csum(s
+       const struct macvlan_dev *vlan = netdev_priv(dev);
+       struct net_device *lowerdev = vlan->lowerdev;
+-      if (lowerdev->ethtool_ops->get_rx_csum == NULL)
++      if (lowerdev->ethtool_ops == NULL ||
++          lowerdev->ethtool_ops->get_rx_csum == NULL)
+               return 0;
+       return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
+ }
+@@ -340,7 +341,8 @@ static int macvlan_ethtool_get_settings(
+       const struct macvlan_dev *vlan = netdev_priv(dev);
+       struct net_device *lowerdev = vlan->lowerdev;
+-      if (!lowerdev->ethtool_ops->get_settings)
++      if (!lowerdev->ethtool_ops ||
++          !lowerdev->ethtool_ops->get_settings)
+               return -EOPNOTSUPP;
+       return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
+@@ -351,7 +353,8 @@ static u32 macvlan_ethtool_get_flags(str
+       const struct macvlan_dev *vlan = netdev_priv(dev);
+       struct net_device *lowerdev = vlan->lowerdev;
+-      if (!lowerdev->ethtool_ops->get_flags)
++      if (!lowerdev->ethtool_ops ||
++          !lowerdev->ethtool_ops->get_flags)
+               return 0;
+       return lowerdev->ethtool_ops->get_flags(lowerdev);
+ }
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -668,7 +668,8 @@ static int vlan_ethtool_get_settings(str
+       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct net_device *real_dev = vlan->real_dev;
+-      if (!real_dev->ethtool_ops->get_settings)
++      if (!real_dev->ethtool_ops ||
++          !real_dev->ethtool_ops->get_settings)
+               return -EOPNOTSUPP;
+       return real_dev->ethtool_ops->get_settings(real_dev, cmd);
diff --git a/queue-2.6.29/xfrm-wrong-hash-value-for-temporary-sa.patch b/queue-2.6.29/xfrm-wrong-hash-value-for-temporary-sa.patch
new file mode 100644 (file)
index 0000000..4af284c
--- /dev/null
@@ -0,0 +1,44 @@
+From 5ad8585fef1612245f19245a418482adefef4601 Mon Sep 17 00:00:00 2001
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Mon, 27 Apr 2009 02:58:59 -0700
+Subject: xfrm: wrong hash value for temporary SA
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit 6a783c9067e3f71aac61a9262fe42c1f68efd4fc ]
+
+When kernel inserts a temporary SA for IKE, it uses the wrong hash
+value for dst list. Two hash values were calcultated before: one with
+source address and one with a wildcard source address.
+
+Bug hinted by Junwei Zhang <junwei.zhang@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/xfrm/xfrm_state.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, x
+ {
+       static xfrm_address_t saddr_wildcard = { };
+       struct net *net = xp_net(pol);
+-      unsigned int h;
++      unsigned int h, h_wildcard;
+       struct hlist_node *entry;
+       struct xfrm_state *x, *x0, *to_put;
+       int acquire_in_progress = 0;
+@@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, x
+       if (best)
+               goto found;
+-      h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+-      hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
++      h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
++      hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
+               if (x->props.family == family &&
+                   x->props.reqid == tmpl->reqid &&
+                   !(x->props.flags & XFRM_STATE_WILDRECV) &&