--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 11 Jan 2015 10:32:18 -0800
+Subject: alx: fix alx_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 7a05dc64e2e4c611d89007b125b20c0d2a4d31a5 ]
+
+Commit d75b1ade567f ("net: less interrupt masking in NAPI") uncovered
+wrong alx_poll() behavior.
+
+A NAPI poll() handler is supposed to return exactly the budget when/if
+napi_complete() has not been called.
+
+It is also supposed to return number of frames that were received, so
+that netdev_budget can have a meaning.
+
+Also, in case of TX pressure, we still have to dequeue received
+packets : alx_clean_rx_irq() has to be called even if
+alx_clean_tx_irq(alx) returns false, otherwise device is half duplex.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: d75b1ade567f ("net: less interrupt masking in NAPI")
+Reported-by: Oded Gabbay <oded.gabbay@amd.com>
+Bisected-by: Oded Gabbay <oded.gabbay@amd.com>
+Tested-by: Oded Gabbay <oded.gabbay@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/atheros/alx/main.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct al
+ schedule_work(&alx->reset_wk);
+ }
+
+-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
++static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ {
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_rrd *rrd;
+ struct alx_buffer *rxb;
+ struct sk_buff *skb;
+ u16 length, rfd_cleaned = 0;
++ int work = 0;
+
+- while (budget > 0) {
++ while (work < budget) {
+ rrd = &rxq->rrd[rxq->rrd_read_idx];
+ if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+ break;
+@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_
+ ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_NOR) != 1) {
+ alx_schedule_reset(alx);
+- return 0;
++ return work;
+ }
+
+ rxb = &rxq->bufs[rxq->read_idx];
+@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_
+ }
+
+ napi_gro_receive(&alx->napi, skb);
+- budget--;
++ work++;
+
+ next_pkt:
+ if (++rxq->read_idx == alx->rx_ringsz)
+@@ -258,21 +259,22 @@ next_pkt:
+ if (rfd_cleaned)
+ alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+- return budget > 0;
++ return work;
+ }
+
+ static int alx_poll(struct napi_struct *napi, int budget)
+ {
+ struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ struct alx_hw *hw = &alx->hw;
+- bool complete = true;
+ unsigned long flags;
++ bool tx_complete;
++ int work;
+
+- complete = alx_clean_tx_irq(alx) &&
+- alx_clean_rx_irq(alx, budget);
++ tx_complete = alx_clean_tx_irq(alx);
++ work = alx_clean_rx_irq(alx, budget);
+
+- if (!complete)
+- return 1;
++ if (!tx_complete || work == budget)
++ return budget;
+
+ napi_complete(&alx->napi);
+
+@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *
+
+ alx_post_write(hw);
+
+- return 0;
++ return work;
+ }
+
+ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+Date: Thu, 18 Dec 2014 15:58:42 +0530
+Subject: enic: fix rx skb checksum
+
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+
+[ Upstream commit 17e96834fd35997ca7cdfbf15413bcd5a36ad448 ]
+
+Hardware always provides compliment of IP pseudo checksum. Stack expects
+whole packet checksum without pseudo checksum if CHECKSUM_COMPLETE is set.
+
+This causes checksum error in nf & ovs.
+
+kernel: qg-19546f09-f2: hw csum failure
+kernel: CPU: 9 PID: 0 Comm: swapper/9 Tainted: GF O-------------- 3.10.0-123.8.1.el7.x86_64 #1
+kernel: Hardware name: Cisco Systems Inc UCSB-B200-M3/UCSB-B200-M3, BIOS B200M3.2.2.3.0.080820141339 08/08/2014
+kernel: ffff881218f40000 df68243feb35e3a8 ffff881237a43ab8 ffffffff815e237b
+kernel: ffff881237a43ad0 ffffffff814cd4ca ffff8829ec71eb00 ffff881237a43af0
+kernel: ffffffff814c6232 0000000000000286 ffff8829ec71eb00 ffff881237a43b00
+kernel: Call Trace:
+kernel: <IRQ> [<ffffffff815e237b>] dump_stack+0x19/0x1b
+kernel: [<ffffffff814cd4ca>] netdev_rx_csum_fault+0x3a/0x40
+kernel: [<ffffffff814c6232>] __skb_checksum_complete_head+0x62/0x70
+kernel: [<ffffffff814c6251>] __skb_checksum_complete+0x11/0x20
+kernel: [<ffffffff8155a20c>] nf_ip_checksum+0xcc/0x100
+kernel: [<ffffffffa049edc7>] icmp_error+0x1f7/0x35c [nf_conntrack_ipv4]
+kernel: [<ffffffff814cf419>] ? netif_rx+0xb9/0x1d0
+kernel: [<ffffffffa040eb7b>] ? internal_dev_recv+0xdb/0x130 [openvswitch]
+kernel: [<ffffffffa04c8330>] nf_conntrack_in+0xf0/0xa80 [nf_conntrack]
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffffa049e302>] ipv4_conntrack_in+0x22/0x30 [nf_conntrack_ipv4]
+kernel: [<ffffffff815005ca>] nf_iterate+0xaa/0xc0
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffff81500664>] nf_hook_slow+0x84/0x140
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffff81509dd4>] ip_rcv+0x344/0x380
+
+Hardware verifies IP & tcp/udp header checksum but does not provide payload
+checksum, use CHECKSUM_UNNECESSARY. Set it only if its valid IP tcp/udp packet.
+
+Cc: Jiri Benc <jbenc@redhat.com>
+Cc: Stefan Assmann <sassmann@redhat.com>
+Reported-by: Sunil Choudhary <schoudha@redhat.com>
+Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
+Reviewed-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cisco/enic/enic_main.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1294,10 +1294,14 @@ static void enic_rq_indicate_buf(struct
+ skb_put(skb, bytes_written);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
+- skb->csum = htons(checksum);
+- skb->ip_summed = CHECKSUM_COMPLETE;
+- }
++ /* Hardware does not provide whole packet checksum. It only
++ * provides pseudo checksum. Since hw validates the packet
++ * checksum but not provide us the checksum value. use
++ * CHECSUM_UNNECESSARY.
++ */
++ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
++ ipv4_csum_ok)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: David Miller <davem@davemloft.net>
+Date: Tue, 16 Dec 2014 17:58:17 -0500
+Subject: netlink: Always copy on mmap TX.
+
+From: David Miller <davem@davemloft.net>
+
+[ Upstream commit 4682a0358639b29cf69437ed909c6221f8c89847 ]
+
+Checking the file f_count and the nlk->mapped count is not completely
+sufficient to prevent the mmap'd area contents from changing from
+under us during netlink mmap sendmsg() operations.
+
+Be careful to sample the header's length field only once, because this
+could change from under us as well.
+
+Fixes: 5fd96123ee19 ("netlink: implement memory mapped sendmsg()")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 52 ++++++++++++++---------------------------------
+ 1 file changed, 16 insertions(+), 36 deletions(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -374,14 +374,14 @@ out:
+ return err;
+ }
+
+-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
+ {
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ struct page *p_start, *p_end;
+
+ /* First page is flushed through netlink_{get,set}_status */
+ p_start = pgvec_to_page(hdr + PAGE_SIZE);
+- p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
++ p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
+ while (p_start <= p_end) {
+ flush_dcache_page(p_start);
+ p_start++;
+@@ -563,24 +563,16 @@ static int netlink_mmap_sendmsg(struct s
+ struct nl_mmap_hdr *hdr;
+ struct sk_buff *skb;
+ unsigned int maxlen;
+- bool excl = true;
+ int err = 0, len = 0;
+
+- /* Netlink messages are validated by the receiver before processing.
+- * In order to avoid userspace changing the contents of the message
+- * after validation, the socket and the ring may only be used by a
+- * single process, otherwise we fall back to copying.
+- */
+- if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+- atomic_read(&nlk->mapped) > 1)
+- excl = false;
+-
+ mutex_lock(&nlk->pg_vec_lock);
+
+ ring = &nlk->tx_ring;
+ maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+
+ do {
++ unsigned int nm_len;
++
+ hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+ if (hdr == NULL) {
+ if (!(msg->msg_flags & MSG_DONTWAIT) &&
+@@ -588,35 +580,23 @@ static int netlink_mmap_sendmsg(struct s
+ schedule();
+ continue;
+ }
+- if (hdr->nm_len > maxlen) {
++
++ nm_len = ACCESS_ONCE(hdr->nm_len);
++ if (nm_len > maxlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+- netlink_frame_flush_dcache(hdr);
++ netlink_frame_flush_dcache(hdr, nm_len);
+
+- if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
+- skb = alloc_skb_head(GFP_KERNEL);
+- if (skb == NULL) {
+- err = -ENOBUFS;
+- goto out;
+- }
+- sock_hold(sk);
+- netlink_ring_setup_skb(skb, sk, ring, hdr);
+- NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
+- __skb_put(skb, hdr->nm_len);
+- netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+- atomic_inc(&ring->pending);
+- } else {
+- skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
+- if (skb == NULL) {
+- err = -ENOBUFS;
+- goto out;
+- }
+- __skb_put(skb, hdr->nm_len);
+- memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+- netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
++ skb = alloc_skb(nm_len, GFP_KERNEL);
++ if (skb == NULL) {
++ err = -ENOBUFS;
++ goto out;
+ }
++ __skb_put(skb, nm_len);
++ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
++ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+
+ netlink_increment_head(ring);
+
+@@ -662,7 +642,7 @@ static void netlink_queue_mmaped_skb(str
+ hdr->nm_pid = NETLINK_CB(skb).creds.pid;
+ hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+ hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+- netlink_frame_flush_dcache(hdr);
++ netlink_frame_flush_dcache(hdr, hdr->nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+
+ NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: Thomas Graf <tgraf@suug.ch>
+Date: Thu, 18 Dec 2014 10:30:26 +0000
+Subject: netlink: Don't reorder loads/stores before marking mmap netlink frame as available
+
+From: Thomas Graf <tgraf@suug.ch>
+
+[ Upstream commit a18e6a186f53af06937a2c268c72443336f4ab56 ]
+
+Each mmap Netlink frame contains a status field which indicates
+whether the frame is unused, reserved, contains data or needs to
+be skipped. Both loads and stores may not be reordeded and must
+complete before the status field is changed and another CPU might
+pick up the frame for use. Use an smp_mb() to cover needs of both
+types of callers to netlink_set_status(), callers which have been
+reading data frame from the frame, and callers which have been
+filling or releasing and thus writing to the frame.
+
+- Example code path requiring a smp_rmb():
+ memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+ netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+
+- Example code path requiring a smp_wmb():
+ hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+ hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+ netlink_frame_flush_dcache(hdr);
+ netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+
+Fixes: f9c228 ("netlink: implement memory mapped recvmsg()")
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -399,9 +399,9 @@ static enum nl_mmap_status netlink_get_s
+ static void netlink_set_status(struct nl_mmap_hdr *hdr,
+ enum nl_mmap_status status)
+ {
++ smp_mb();
+ hdr->nm_status = status;
+ flush_dcache_page(pgvec_to_page(hdr));
+- smp_wmb();
+ }
+
+ static struct nl_mmap_hdr *
--- /dev/null
+netlink-always-copy-on-mmap-tx.patch
+netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch
+tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch
+tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch
+alx-fix-alx_poll.patch
+enic-fix-rx-skb-checksum.patch
--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 1 Jan 2015 00:39:23 +1100
+Subject: tcp: Do not apply TSO segment limit to non-TSO packets
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 843925f33fcc293d80acf2c5c8a78adf3344d49b ]
+
+Thomas Jarosch reported IPsec TCP stalls when a PMTU event occurs.
+
+In fact the problem was completely unrelated to IPsec. The bug is
+also reproducible if you just disable TSO/GSO.
+
+The problem is that when the MSS goes down, existing queued packet
+on the TX queue that have not been transmitted yet all look like
+TSO packets and get treated as such.
+
+This then triggers a bug where tcp_mss_split_point tells us to
+generate a zero-sized packet on the TX queue. Once that happens
+we're screwed because the zero-sized packet can never be removed
+by ACKs.
+
+Fixes: 1485348d242 ("tcp: Apply device TSO segment limit earlier")
+Reported-by: Thomas Jarosch <thomas.jarosch@intra2net.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+
+Cheers,
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1862,7 +1862,7 @@ static bool tcp_write_xmit(struct sock *
+ if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
+ break;
+
+- if (tso_segs == 1) {
++ if (tso_segs == 1 || !sk->sk_gso_max_segs) {
+ if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
+ (tcp_skb_is_last(sk, skb) ?
+ nonagle : TCP_NAGLE_PUSH))))
+@@ -1899,7 +1899,7 @@ static bool tcp_write_xmit(struct sock *
+ }
+
+ limit = mss_now;
+- if (tso_segs > 1 && !tcp_urg_mode(tp))
++ if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
+ limit = tcp_mss_split_point(sk, skb, mss_now,
+ min_t(unsigned int,
+ cwnd_quota,
--- /dev/null
+From foo@baz Sat Jan 17 18:13:47 PST 2015
+From: Prashant Sreedharan <prashant@broadcom.com>
+Date: Sat, 20 Dec 2014 12:16:17 -0800
+Subject: tg3: tg3_disable_ints using uninitialized mailbox value to disable interrupts
+
+From: Prashant Sreedharan <prashant@broadcom.com>
+
+[ Upstream commit 05b0aa579397b734f127af58e401a30784a1e315 ]
+
+During driver load in tg3_init_one, if the driver detects DMA activity before
+intializing the chip tg3_halt is called. As part of tg3_halt interrupts are
+disabled using routine tg3_disable_ints. This routine was using mailbox value
+which was not initialized (default value is 0). As a result driver was writing
+0x00000001 to pci config space register 0, which is the vendor id / device id.
+
+This driver bug was exposed because of the commit a7877b17a667 (PCI: Check only
+the Vendor ID to identify Configuration Request Retry). Also this issue is only
+seen in older generation chipsets like 5722 because config space write to offset
+0 from driver is possible. The newer generation chips ignore writes to offset 0.
+Also without commit a7877b17a667, for these older chips when a GRC reset is
+issued the Bootcode would reprogram the vendor id/device id, which is the reason
+this bug was masked earlier.
+
+Fixed by initializing the interrupt mailbox registers before calling tg3_halt.
+
+Please queue for -stable.
+
+Reported-by: Nils Holland <nholland@tisys.org>
+Reported-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Prashant Sreedharan <prashant@broadcom.com>
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17389,23 +17389,6 @@ static int tg3_init_one(struct pci_dev *
+ goto err_out_apeunmap;
+ }
+
+- /*
+- * Reset chip in case UNDI or EFI driver did not shutdown
+- * DMA self test will enable WDMAC and we'll see (spurious)
+- * pending DMA on the PCI bus at that point.
+- */
+- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+- }
+-
+- err = tg3_test_dma(tp);
+- if (err) {
+- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+- goto err_out_apeunmap;
+- }
+-
+ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+@@ -17450,6 +17433,23 @@ static int tg3_init_one(struct pci_dev *
+ sndmbx += 0xc;
+ }
+
++ /*
++ * Reset chip in case UNDI or EFI driver did not shutdown
++ * DMA self test will enable WDMAC and we'll see (spurious)
++ * pending DMA on the PCI bus at that point.
++ */
++ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
++ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++ }
++
++ err = tg3_test_dma(tp);
++ if (err) {
++ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
++ goto err_out_apeunmap;
++ }
++
+ tg3_init_coal(tp);
+
+ pci_set_drvdata(pdev, dev);
--- /dev/null
+gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch
+netlink-always-copy-on-mmap-tx.patch
+netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch
+in6-fix-conflict-with-glibc.patch
+tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch
+batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch
+batman-adv-unify-fragment-size-calculation.patch
+batman-adv-avoid-null-dereferences-and-fix-if-check.patch
+net-fix-stacked-vlan-offload-features-computation.patch
+net-reset-secmark-when-scrubbing-packet.patch
+tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch
+alx-fix-alx_poll.patch
+team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch
+enic-fix-rx-skb-checksum.patch
+net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch
--- /dev/null
+gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch
+net-mlx4-cache-line-cqe-eqe-stride-fixes.patch
+netlink-always-copy-on-mmap-tx.patch
+netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch
+geneve-remove-socket-and-offload-handlers-at-destruction.patch
+geneve-fix-races-between-socket-add-and-release.patch
+xen-netback-support-frontends-without-feature-rx-notify-again.patch
+net-drop-the-packet-when-fails-to-do-software-segmentation-or-header-check.patch
+in6-fix-conflict-with-glibc.patch
+tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch
+batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch
+batman-adv-unify-fragment-size-calculation.patch
+batman-adv-avoid-null-dereferences-and-fix-if-check.patch
+net-mlx4_en-doorbell-is-byteswapped-in-little-endian-archs.patch
+tcp6-don-t-move-ip6cb-before-xfrm6_policy_check.patch
+net-fix-stacked-vlan-offload-features-computation.patch
+net-reset-secmark-when-scrubbing-packet.patch
+net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch
+net-generalize-ndo_gso_check-to-ndo_features_check.patch
+net-mlx4_core-correcly-update-the-mtt-s-offset-in-the-mr-re-reg-flow.patch
+tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch
+xen-netback-fixing-the-propagation-of-the-transmit-shaper-timeout.patch
+alx-fix-alx_poll.patch
+team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch
+enic-fix-rx-skb-checksum.patch