--- /dev/null
+From e1a3ddca02595af27c6057610f7b12f1b0ac21c6 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Tue, 21 Sep 2010 08:47:45 +0000
+Subject: ip: fix truesize mismatch in ip fragmentation
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit 3d13008e7345fa7a79d8f6438150dc15d6ba6e9d ]
+
+Special care should be taken when slow path is hit in ip_fragment() :
+
+When walking through frags, we transfert truesize ownership from skb to
+frags. Then if we hit a slow_path condition, we must undo this or risk
+uncharging frags->truesize twice, and in the end, having negative socket
+sk_wmem_alloc counter, or even freeing socket sooner than expected.
+
+Many thanks to Nick Bowler, who provided a very clean bug report and
+test program.
+
+Thanks to Jarek for reviewing my first patch and providing a V2
+
+While Nick bisection pointed to commit 2b85a34e911 (net: No more
+expensive sock_hold()/sock_put() on each tx), underlying bug is older
+(2.6.12-rc5)
+
+A side effect is to extend work done in commit b2722b1c3a893e
+(ip_fragment: also adjust skb->truesize for packets not owned by a
+socket) to ipv6 as well.
+
+Reported-and-bisected-by: Nick Bowler <nbowler@elliptictech.com>
+Tested-by: Nick Bowler <nbowler@elliptictech.com>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+CC: Jarek Poplawski <jarkao2@gmail.com>
+CC: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/ip_output.c | 19 +++++++++++++------
+ net/ipv6/ip6_output.c | 18 +++++++++++++-----
+ 2 files changed, 26 insertions(+), 11 deletions(-)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -476,9 +476,8 @@ int ip_fragment(struct sk_buff *skb, int
+ * we can switch to copy when see the first bad fragment.
+ */
+ if (skb_has_frags(skb)) {
+- struct sk_buff *frag;
++ struct sk_buff *frag, *frag2;
+ int first_len = skb_pagelen(skb);
+- int truesizes = 0;
+
+ if (first_len - hlen > mtu ||
+ ((first_len - hlen) & 7) ||
+@@ -491,18 +490,18 @@ int ip_fragment(struct sk_buff *skb, int
+ if (frag->len > mtu ||
+ ((frag->len & 7) && frag->next) ||
+ skb_headroom(frag) < hlen)
+- goto slow_path;
++ goto slow_path_clean;
+
+ /* Partially cloned skb? */
+ if (skb_shared(frag))
+- goto slow_path;
++ goto slow_path_clean;
+
+ BUG_ON(frag->sk);
+ if (skb->sk) {
+ frag->sk = skb->sk;
+ frag->destructor = sock_wfree;
+ }
+- truesizes += frag->truesize;
++ skb->truesize -= frag->truesize;
+ }
+
+ /* Everything is OK. Generate! */
+@@ -512,7 +511,6 @@ int ip_fragment(struct sk_buff *skb, int
+ frag = skb_shinfo(skb)->frag_list;
+ skb_frag_list_init(skb);
+ skb->data_len = first_len - skb_headlen(skb);
+- skb->truesize -= truesizes;
+ skb->len = first_len;
+ iph->tot_len = htons(first_len);
+ iph->frag_off = htons(IP_MF);
+@@ -564,6 +562,15 @@ int ip_fragment(struct sk_buff *skb, int
+ }
+ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ return err;
++
++slow_path_clean:
++ skb_walk_frags(skb, frag2) {
++ if (frag2 == frag)
++ break;
++ frag2->sk = NULL;
++ frag2->destructor = NULL;
++ skb->truesize += frag2->truesize;
++ }
+ }
+
+ slow_path:
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -643,7 +643,7 @@ static int ip6_fragment(struct sk_buff *
+
+ if (skb_has_frags(skb)) {
+ int first_len = skb_pagelen(skb);
+- int truesizes = 0;
++ struct sk_buff *frag2;
+
+ if (first_len - hlen > mtu ||
+ ((first_len - hlen) & 7) ||
+@@ -655,18 +655,18 @@ static int ip6_fragment(struct sk_buff *
+ if (frag->len > mtu ||
+ ((frag->len & 7) && frag->next) ||
+ skb_headroom(frag) < hlen)
+- goto slow_path;
++ goto slow_path_clean;
+
+ /* Partially cloned skb? */
+ if (skb_shared(frag))
+- goto slow_path;
++ goto slow_path_clean;
+
+ BUG_ON(frag->sk);
+ if (skb->sk) {
+ frag->sk = skb->sk;
+ frag->destructor = sock_wfree;
+- truesizes += frag->truesize;
+ }
++ skb->truesize -= frag->truesize;
+ }
+
+ err = 0;
+@@ -697,7 +697,6 @@ static int ip6_fragment(struct sk_buff *
+
+ first_len = skb_pagelen(skb);
+ skb->data_len = first_len - skb_headlen(skb);
+- skb->truesize -= truesizes;
+ skb->len = first_len;
+ ipv6_hdr(skb)->payload_len = htons(first_len -
+ sizeof(struct ipv6hdr));
+@@ -760,6 +759,15 @@ static int ip6_fragment(struct sk_buff *
+ IPSTATS_MIB_FRAGFAILS);
+ dst_release(&rt->u.dst);
+ return err;
++
++slow_path_clean:
++ skb_walk_frags(skb, frag2) {
++ if (frag2 == frag)
++ break;
++ frag2->sk = NULL;
++ frag2->destructor = NULL;
++ skb->truesize += frag2->truesize;
++ }
+ }
+
+ slow_path:
--- /dev/null
+From 9e244c6ac5641f48b55044d9fc3995f32a2f8208 Mon Sep 17 00:00:00 2001
+From: Jianzhao Wang <jianzhao.wang@6wind.com>
+Date: Wed, 8 Sep 2010 14:35:43 -0700
+Subject: net: blackhole route should always be recalculated
+
+
+From: Jianzhao Wang <jianzhao.wang@6wind.com>
+
+[ Upstream commit ae2688d59b5f861dc70a091d003773975d2ae7fb ]
+
+Blackhole routes are used when xfrm_lookup() returns -EREMOTE (error
+triggered by IKE for example), hence this kind of route is always
+temporary and so we should check if a better route exists for next
+packets.
+Bug has been introduced by commit d11a4dc18bf41719c9f0d7ed494d295dd2973b92.
+
+Signed-off-by: Jianzhao Wang <jianzhao.wang@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/route.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2712,6 +2712,11 @@ slow_output:
+
+ EXPORT_SYMBOL_GPL(__ip_route_output_key);
+
++static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
++{
++ return NULL;
++}
++
+ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+ {
+ }
+@@ -2720,7 +2725,7 @@ static struct dst_ops ipv4_dst_blackhole
+ .family = AF_INET,
+ .protocol = cpu_to_be16(ETH_P_IP),
+ .destroy = ipv4_dst_destroy,
+- .check = ipv4_dst_check,
++ .check = ipv4_blackhole_dst_check,
+ .update_pmtu = ipv4_rt_blackhole_update_pmtu,
+ .entries = ATOMIC_INIT(0),
+ };
--- /dev/null
+From cc0ae35298fdf854a48fbf16be07177156a282a7 Mon Sep 17 00:00:00 2001
+From: Kees Cook <kees.cook@canonical.com>
+Date: Mon, 11 Oct 2010 12:23:25 -0700
+Subject: net: clear heap allocations for privileged ethtool actions
+
+
+From: Kees Cook <kees.cook@canonical.com>
+
+[ Upstream commit b00916b189d13a615ff05c9242201135992fcda3 ]
+
+Several other ethtool functions leave heap uncleared (potentially) by
+drivers. Some interfaces appear safe (eeprom, etc), in that the sizes
+are well controlled. In some situations (e.g. unchecked error conditions),
+the heap will remain unchanged in areas before copying back to userspace.
+Note that these are less of an issue since these all require CAP_NET_ADMIN.
+
+Cc: stable@kernel.org
+Signed-off-by: Kees Cook <kees.cook@canonical.com>
+Acked-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -311,7 +311,7 @@ static int ethtool_get_regs(struct net_d
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+- regbuf = kmalloc(reglen, GFP_USER);
++ regbuf = kzalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
--- /dev/null
+From c5770f1ec3ce0ea87d6c0193d7f9c1331c278996 Mon Sep 17 00:00:00 2001
+From: Maciej Żenczykowski <maze@google.com>
+Date: Sun, 3 Oct 2010 14:49:00 -0700
+Subject: net: Fix IPv6 PMTU disc. w/ asymmetric routes
+
+
+From: Maciej Żenczykowski <maze@google.com>
+
+[ Upstream commit ae878ae280bea286ff2b1e1cb6e609dd8cb4501d ]
+
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv6/route.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1561,14 +1561,13 @@ out:
+ * i.e. Path MTU discovery
+ */
+
+-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+- struct net_device *dev, u32 pmtu)
++static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
++ struct net *net, u32 pmtu, int ifindex)
+ {
+ struct rt6_info *rt, *nrt;
+- struct net *net = dev_net(dev);
+ int allfrag = 0;
+
+- rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
++ rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
+ if (rt == NULL)
+ return;
+
+@@ -1636,6 +1635,27 @@ out:
+ dst_release(&rt->u.dst);
+ }
+
++void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
++ struct net_device *dev, u32 pmtu)
++{
++ struct net *net = dev_net(dev);
++
++ /*
++ * RFC 1981 states that a node "MUST reduce the size of the packets it
++ * is sending along the path" that caused the Packet Too Big message.
++ * Since it's not possible in the general case to determine which
++ * interface was used to send the original packet, we update the MTU
++ * on the interface that will be used to send future packets. We also
++ * update the MTU on the interface that received the Packet Too Big in
++ * case the original packet was forced out that interface with
++ * SO_BINDTODEVICE or similar. This is the next best thing to the
++ * correct behaviour, which would be to update the MTU on all
++ * interfaces.
++ */
++ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
++ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
++}
++
+ /*
+ * Misc support functions
+ */
--- /dev/null
+From 0c5647cd1cc02c549cfe0ea602e1c498519670ef Mon Sep 17 00:00:00 2001
+From: Nagendra Tomar <tomer_iisc@yahoo.com>
+Date: Sat, 2 Oct 2010 23:45:06 +0000
+Subject: net: Fix the condition passed to sk_wait_event()
+
+
+From: Nagendra Tomar <tomer_iisc@yahoo.com>
+
+[ Upstream commit 482964e56e1320cb7952faa1932d8ecf59c4bf75 ]
+
+This patch fixes the condition (3rd arg) passed to sk_wait_event() in
+sk_stream_wait_memory(). The incorrect check in sk_stream_wait_memory()
+causes the following soft lockup in tcp_sendmsg() when the global tcp
+memory pool has exhausted.
+
+>>> snip <<<
+
+localhost kernel: BUG: soft lockup - CPU#3 stuck for 11s! [sshd:6429]
+localhost kernel: CPU 3:
+localhost kernel: RIP: 0010:[sk_stream_wait_memory+0xcd/0x200] [sk_stream_wait_memory+0xcd/0x200] sk_stream_wait_memory+0xcd/0x200
+localhost kernel:
+localhost kernel: Call Trace:
+localhost kernel: [sk_stream_wait_memory+0x1b1/0x200] sk_stream_wait_memory+0x1b1/0x200
+localhost kernel: [<ffffffff802557c0>] autoremove_wake_function+0x0/0x40
+localhost kernel: [ipv6:tcp_sendmsg+0x6e6/0xe90] tcp_sendmsg+0x6e6/0xce0
+localhost kernel: [sock_aio_write+0x126/0x140] sock_aio_write+0x126/0x140
+localhost kernel: [xfs:do_sync_write+0xf1/0x130] do_sync_write+0xf1/0x130
+localhost kernel: [<ffffffff802557c0>] autoremove_wake_function+0x0/0x40
+localhost kernel: [hrtimer_start+0xe3/0x170] hrtimer_start+0xe3/0x170
+localhost kernel: [vfs_write+0x185/0x190] vfs_write+0x185/0x190
+localhost kernel: [sys_write+0x50/0x90] sys_write+0x50/0x90
+localhost kernel: [system_call+0x7e/0x83] system_call+0x7e/0x83
+
+>>> snip <<<
+
+What is happening is, that the sk_wait_event() condition passed from
+sk_stream_wait_memory() evaluates to true for the case of tcp global memory
+exhaustion. This is because both sk_stream_memory_free() and vm_wait are true
+which causes sk_wait_event() to *not* call schedule_timeout().
+Hence sk_stream_wait_memory() returns immediately to the caller w/o sleeping.
+This causes the caller to again try allocation, which again fails and again
+calls sk_stream_wait_memory(), and so on.
+
+[ Bug introduced by commit c1cbe4b7ad0bc4b1d98ea708a3fecb7362aa4088
+ ("[NET]: Avoid atomic xchg() for non-error case") -DaveM ]
+
+Signed-off-by: Nagendra Singh Tomar <tomer_iisc@yahoo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/stream.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -140,10 +140,10 @@ int sk_stream_wait_memory(struct sock *s
+
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ sk->sk_write_pending++;
+- sk_wait_event(sk, ¤t_timeo, !sk->sk_err &&
+- !(sk->sk_shutdown & SEND_SHUTDOWN) &&
+- sk_stream_memory_free(sk) &&
+- vm_wait);
++ sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
++ (sk->sk_shutdown & SEND_SHUTDOWN) ||
++ (sk_stream_memory_free(sk) &&
++ !vm_wait));
+ sk->sk_write_pending--;
+
+ if (vm_wait) {
--- /dev/null
+From a15b685b82ecb42ca10fae48a5a07aebf348c480 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Tue, 21 Sep 2010 13:04:04 -0700
+Subject: netxen: dont set skb->truesize
+
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit 7e96dc7045bff8758804b047c0dfb6868f182500 ]
+
+skb->truesize is set in core network.
+
+Dont change it unless dealing with fragments.
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/net/netxen/netxen_nic_init.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/net/netxen/netxen_nic_init.c
++++ b/drivers/net/netxen/netxen_nic_init.c
+@@ -1199,7 +1199,6 @@ netxen_process_rcv(struct netxen_adapter
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+- skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+@@ -1261,8 +1260,6 @@ netxen_process_lro(struct netxen_adapter
+
+ skb_put(skb, lro_length + data_offset);
+
+- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+-
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
--- /dev/null
+From fb179ba0a33882bf9f639202e7a90f7c77d05a00 Mon Sep 17 00:00:00 2001
+From: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+Date: Mon, 27 Sep 2010 23:10:42 +0000
+Subject: Phonet: Correct header retrieval after pskb_may_pull
+
+
+From: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+
+[ Upstream commit a91e7d471e2e384035b9746ea707ccdcd353f5dd ]
+
+Retrieve the header after doing pskb_may_pull since, pskb_may_pull
+could change the buffer structure.
+
+This is based on the comment given by Eric Dumazet on Phonet
+Pipe controller patch for a similar problem.
+
+Signed-off-by: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+Acked-by: Linus Walleij <linus.walleij@stericsson.com>
+Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
+Acked-by: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/phonet/pep.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -224,12 +224,13 @@ static void pipe_grant_credits(struct so
+ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
+ {
+ struct pep_sock *pn = pep_sk(sk);
+- struct pnpipehdr *hdr = pnp_hdr(skb);
++ struct pnpipehdr *hdr;
+ int wake = 0;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
++ hdr = pnp_hdr(skb);
+ if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+ LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
+ (unsigned)hdr->data[0]);
--- /dev/null
+From aeb19f6052b5e5c8a24aa444fbff73b84341beac Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Fri, 8 Oct 2010 04:25:00 +0000
+Subject: r8169: allocate with GFP_KERNEL flag when able to sleep
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit aeb19f6052b5e5c8a24aa444fbff73b84341beac upstream.
+
+We have fedora bug report where driver fail to initialize after
+suspend/resume because of memory allocation errors:
+https://bugzilla.redhat.com/show_bug.cgi?id=629158
+
+To fix use GFP_KERNEL allocation where possible.
+
+Tested-by: Neal Becker <ndbecker2@gmail.com>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/r8169.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -3999,7 +3999,7 @@ static inline void rtl8169_map_to_asic(s
+ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
+ struct net_device *dev,
+ struct RxDesc *desc, int rx_buf_sz,
+- unsigned int align)
++ unsigned int align, gfp_t gfp)
+ {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+@@ -4007,7 +4007,7 @@ static struct sk_buff *rtl8169_alloc_rx_
+
+ pad = align ? align : NET_IP_ALIGN;
+
+- skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
++ skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
+ if (!skb)
+ goto err_out;
+
+@@ -4038,7 +4038,7 @@ static void rtl8169_rx_clear(struct rtl8
+ }
+
+ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
+- u32 start, u32 end)
++ u32 start, u32 end, gfp_t gfp)
+ {
+ u32 cur;
+
+@@ -4053,7 +4053,7 @@ static u32 rtl8169_rx_fill(struct rtl816
+
+ skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
+ tp->RxDescArray + i,
+- tp->rx_buf_sz, tp->align);
++ tp->rx_buf_sz, tp->align, gfp);
+ if (!skb)
+ break;
+
+@@ -4081,7 +4081,7 @@ static int rtl8169_init_ring(struct net_
+ memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
+ memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+- if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
++ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
+ goto err_out;
+
+ rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+@@ -4584,7 +4584,7 @@ static int rtl8169_rx_interrupt(struct n
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
+
+- delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
++ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
+ if (!delta && count && netif_msg_intr(tp))
+ printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ tp->dirty_rx += delta;
--- /dev/null
+From 82150dc22741d65244fd2b1694e5296a8df22741 Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Mon, 20 Sep 2010 15:40:35 -0700
+Subject: rose: Fix signedness issues wrt. digi count.
+
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 9828e6e6e3f19efcb476c567b9999891d051f52f ]
+
+Just use explicit casts, since we really can't change the
+types of structures exported to userspace which have been
+around for 15 years or so.
+
+Reported-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/rose/af_rose.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -677,7 +677,7 @@ static int rose_bind(struct socket *sock
+ if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
+ return -EINVAL;
+
+- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
++ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+ return -EINVAL;
+
+ if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
+@@ -737,7 +737,7 @@ static int rose_connect(struct socket *s
+ if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
+ return -EINVAL;
+
+- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
++ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+ return -EINVAL;
+
+ /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
x86-amd-mce-thresholding-fix-the-mci_miscj-iteration-order.patch
de-pessimize-rds_page_copy_user.patch
drm-radeon-fix-pci-id-5657-to-be-an-rv410.patch
+xfrm4-strip-ecn-and-ip-precedence-bits-in-policy-lookup.patch
+tcp-fix-4gb-writes-on-64-bit.patch
+net-fix-the-condition-passed-to-sk_wait_event.patch
+phonet-correct-header-retrieval-after-pskb_may_pull.patch
+net-fix-ipv6-pmtu-disc.-w-asymmetric-routes.patch
+ip-fix-truesize-mismatch-in-ip-fragmentation.patch
+net-clear-heap-allocations-for-privileged-ethtool-actions.patch
+tcp-fix-race-in-tcp_poll.patch
+netxen-dont-set-skb-truesize.patch
+rose-fix-signedness-issues-wrt.-digi-count.patch
+net-blackhole-route-should-always-be-recalculated.patch
+skge-add-quirk-to-limit-dma.patch
+r8169-allocate-with-gfp_kernel-flag-when-able-to-sleep.patch
--- /dev/null
+From 392bd0cb000d4aac9e88e4f50823db85e7220688 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Tue, 5 Oct 2010 15:11:40 -0700
+Subject: skge: add quirk to limit DMA
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 392bd0cb000d4aac9e88e4f50823db85e7220688 upstream.
+
+Skge devices installed on some Gigabyte motherboards are not able to
+perform 64 dma correctly due to board PCI implementation, so limit
+DMA to 32bit if such boards are detected.
+
+Bug was reported here:
+https://bugzilla.redhat.com/show_bug.cgi?id=447489
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Tested-by: Luya Tshimbalanga <luya@fedoraproject.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/skge.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/skge.c
++++ b/drivers/net/skge.c
+@@ -40,6 +40,7 @@
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <linux/mii.h>
++#include <linux/dmi.h>
+ #include <asm/irq.h>
+
+ #include "skge.h"
+@@ -3890,6 +3891,8 @@ static void __devinit skge_show_addr(str
+ dev->name, dev->dev_addr);
+ }
+
++static int only_32bit_dma;
++
+ static int __devinit skge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+@@ -3911,7 +3914,7 @@ static int __devinit skge_probe(struct p
+
+ pci_set_master(pdev);
+
+- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
++ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+@@ -4168,8 +4171,21 @@ static struct pci_driver skge_driver = {
+ .shutdown = skge_shutdown,
+ };
+
++static struct dmi_system_id skge_32bit_dma_boards[] = {
++ {
++ .ident = "Gigabyte nForce boards",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
++ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
++ },
++ },
++ {}
++};
++
+ static int __init skge_init_module(void)
+ {
++ if (dmi_check_system(skge_32bit_dma_boards))
++ only_32bit_dma = 1;
+ skge_debug_init();
+ return pci_register_driver(&skge_driver);
+ }
--- /dev/null
+From 9d7439227df4cc64a8c023a32f2731081a0a49a9 Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Mon, 27 Sep 2010 20:24:54 -0700
+Subject: tcp: Fix >4GB writes on 64-bit.
+
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 01db403cf99f739f86903314a489fb420e0e254f ]
+
+Fixes kernel bugzilla #16603
+
+tcp_sendmsg() truncates iov_len to an 'int' which a 4GB write to write
+zero bytes, for example.
+
+There is also the problem higher up of how verify_iovec() works. It
+wants to prevent the total length from looking like an error return
+value.
+
+However it does this using 'int', but syscalls return 'long' (and
+thus signed 64-bit on 64-bit machines). So it could trigger
+false-positives on 64-bit as written. So fix it to use 'long'.
+
+Reported-by: Olaf Bonorden <bono@onlinehome.de>
+Reported-by: Daniel Büse <dbuese@gmx.de>
+Reported-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ include/linux/socket.h | 2 +-
+ net/core/iovec.c | 5 +++--
+ net/ipv4/tcp.c | 2 +-
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -304,7 +304,7 @@ extern int csum_partial_copy_fromiovecen
+ int offset,
+ unsigned int len, __wsum *csump);
+
+-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
++extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+ extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+ extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+ int offset, int len);
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -36,9 +36,10 @@
+ * in any case.
+ */
+
+-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
++long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+ {
+- int size, err, ct;
++ int size, ct;
++ long err;
+
+ if (m->msg_namelen) {
+ if (mode == VERIFY_READ) {
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -935,7 +935,7 @@ int tcp_sendmsg(struct kiocb *iocb, stru
+ goto out_err;
+
+ while (--iovlen >= 0) {
+- int seglen = iov->iov_len;
++ size_t seglen = iov->iov_len;
+ unsigned char __user *from = iov->iov_base;
+
+ iov++;
--- /dev/null
+From eb50686502e040058eb7f12f824f45ef04edc380 Mon Sep 17 00:00:00 2001
+From: Tom Marshall <tdm.code@gmail.com>
+Date: Mon, 20 Sep 2010 15:42:05 -0700
+Subject: tcp: Fix race in tcp_poll
+
+
+From: Tom Marshall <tdm.code@gmail.com>
+
+[ Upstream commit a4d258036ed9b2a1811c3670c6099203a0f284a0 ]
+
+If a RST comes in immediately after checking sk->sk_err, tcp_poll will
+return POLLIN but not POLLOUT. Fix this by checking sk->sk_err at the end
+of tcp_poll. Additionally, ensure the correct order of operations on SMP
+machines with memory barriers.
+
+Signed-off-by: Tom Marshall <tdm.code@gmail.com>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/tcp.c | 7 +++++--
+ net/ipv4/tcp_input.c | 2 ++
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file,
+ */
+
+ mask = 0;
+- if (sk->sk_err)
+- mask = POLLERR;
+
+ /*
+ * POLLHUP is certainly not done right. But poll() doesn't
+@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file,
+ if (tp->urg_data & TCP_URG_VALID)
+ mask |= POLLPRI;
+ }
++ /* This barrier is coupled with smp_wmb() in tcp_reset() */
++ smp_rmb();
++ if (sk->sk_err)
++ mask |= POLLERR;
++
+ return mask;
+ }
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3969,6 +3969,8 @@ static void tcp_reset(struct sock *sk)
+ default:
+ sk->sk_err = ECONNRESET;
+ }
++ /* This barrier is coupled with smp_rmb() in tcp_poll() */
++ smp_wmb();
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
--- /dev/null
+From 25963c2c4f9a38c02df9ecad6b86df328f651ba2 Mon Sep 17 00:00:00 2001
+From: Ulrich Weber <uweber@astaro.com>
+Date: Tue, 5 Oct 2010 13:46:19 +0200
+Subject: xfrm4: strip ECN and IP Precedence bits in policy lookup
+
+
+From: Ulrich Weber <uweber@astaro.com>
+
+[ Upstream commit 94e2238969e89f5112297ad2a00103089dde7e8f ]
+
+dont compare ECN and IP Precedence bits in find_bundle
+and use ECN bit stripped TOS value in xfrm_lookup
+
+Signed-off-by: Ulrich Weber <uweber@astaro.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/ipv4/xfrm4_policy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -71,7 +71,7 @@ __xfrm4_find_bundle(struct flowi *fl, st
+ if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
+ xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
+ xdst->u.rt.fl.fl4_src == fl->fl4_src &&
+- xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
++ !((xdst->u.rt.fl.fl4_tos ^ fl->fl4_tos) & IPTOS_RT_MASK) &&
+ xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
+ dst_clone(dst);
+ break;
+@@ -83,7 +83,7 @@ __xfrm4_find_bundle(struct flowi *fl, st
+
+ static int xfrm4_get_tos(struct flowi *fl)
+ {
+- return fl->fl4_tos;
++ return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
+ }
+
+ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,