--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: Alexei Starovoitov <ast@plumgrid.com>
+Date: Mon, 27 Apr 2015 14:40:37 -0700
+Subject: bpf: fix 64-bit divide
+
+From: Alexei Starovoitov <ast@plumgrid.com>
+
+[ Upstream commit 876a7ae65b86d8cec8efe7d15d050ac61116874e ]
+
+ALU64_DIV instruction should be dividing 64-bit by 64-bit,
+whereas do_div() does 64-bit by 32-bit divide.
+x64 and arm64 JITs correctly implement 64 by 64 unsigned divide.
+llvm BPF backend emits code assuming that ALU64_DIV does 64 by 64.
+
+Fixes: 89aa075832b0 ("net: sock: allow eBPF programs to be attached to sockets")
+Reported-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/core.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -357,8 +357,8 @@ select_insn:
+ ALU64_MOD_X:
+ if (unlikely(SRC == 0))
+ return 0;
+- tmp = DST;
+- DST = do_div(tmp, SRC);
++ div64_u64_rem(DST, SRC, &tmp);
++ DST = tmp;
+ CONT;
+ ALU_MOD_X:
+ if (unlikely(SRC == 0))
+@@ -367,8 +367,8 @@ select_insn:
+ DST = do_div(tmp, (u32) SRC);
+ CONT;
+ ALU64_MOD_K:
+- tmp = DST;
+- DST = do_div(tmp, IMM);
++ div64_u64_rem(DST, IMM, &tmp);
++ DST = tmp;
+ CONT;
+ ALU_MOD_K:
+ tmp = (u32) DST;
+@@ -377,7 +377,7 @@ select_insn:
+ ALU64_DIV_X:
+ if (unlikely(SRC == 0))
+ return 0;
+- do_div(DST, SRC);
++ DST = div64_u64(DST, SRC);
+ CONT;
+ ALU_DIV_X:
+ if (unlikely(SRC == 0))
+@@ -387,7 +387,7 @@ select_insn:
+ DST = (u32) tmp;
+ CONT;
+ ALU64_DIV_K:
+- do_div(DST, IMM);
++ DST = div64_u64(DST, IMM);
+ CONT;
+ ALU_DIV_K:
+ tmp = (u32) DST;
--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: Hariprasad Shenai <hariprasad@chelsio.com>
+Date: Wed, 29 Apr 2015 17:19:05 +0530
+Subject: cxgb4: Fix MC1 memory offset calculation
+
+From: Hariprasad Shenai <hariprasad@chelsio.com>
+
+[ Upstream commit 7f0b8a56c978b0a3315ac84c6cbb065413afb8e9 ]
+
+Commit 6559a7e8296002b4 ("cxgb4: Cleanup macros so they follow the same
+style and look consistent") introduced a regression where reading MC1
+memory in adapters where MC0 isn't present or MC0 size is not equal to MC1
+size caused the adapter to crash due to incorrect computation of memoffset.
+Fix is to read the size of MC0 instead of MC1 for offset calculation
+
+Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, i
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+- MA_EXT_MEMORY1_BAR_A));
++ MA_EXT_MEMORY0_BAR_A));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 1 May 2015 22:02:47 -0400
+Subject: ipv4: Missing sk_nulls_node_init() in ping_unhash().
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit a134f083e79fb4c3d0a925691e732c56911b4326 ]
+
+If we don't do that, then the poison value is left in the ->pprev
+backlink.
+
+This can cause crashes if we do a disconnect, followed by a connect().
+
+Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Wen Xu <hotdog3645@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
+ if (sk_hashed(sk)) {
+ write_lock_bh(&ping_table.lock);
+ hlist_nulls_del(&sk->sk_nulls_node);
++ sk_nulls_node_init(&sk->sk_nulls_node);
+ sock_put(sk);
+ isk->inet_num = 0;
+ isk->inet_sport = 0;
--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: Benjamin Poirier <bpoirier@suse.de>
+Date: Tue, 28 Apr 2015 14:49:29 -0700
+Subject: mlx4: Fix tx ring affinity_mask creation
+
+From: Benjamin Poirier <bpoirier@suse.de>
+
+[ Upstream commit 42eab005a5dd5d7ea2b0328aecc4d6cc0c23c9c2 ]
+
+By default, the number of tx queues is limited by the number of online cpus
+in mlx4_en_get_profile(). However, this limit no longer holds after the
+ethtool .set_channels method has been called. In that situation, the driver
+may access invalid bits of certain cpumask variables when queue_index >=
+nr_cpu_ids.
+
+Signed-off-by: Benjamin Poirier <bpoirier@suse.de>
+Acked-by: Ido Shamay <idos@mellanox.com>
+Fixes: d03a68f ("net/mlx4_en: Configure the XPS queue mapping on driver load")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_e
+ ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ ring->queue_index = queue_index;
+
+- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+- cpumask_set_cpu(queue_index, &ring->affinity_mask);
++ if (queue_index < priv->num_tx_rings_p_up)
++ cpumask_set_cpu_local_first(queue_index,
++ priv->mdev->dev->numa_node,
++ &ring->affinity_mask);
+
+ *pring = ring;
+ return 0;
+@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ &ring->qp, &ring->qp_state);
+- if (!user_prio && cpu_online(ring->queue_index))
++ if (!cpumask_empty(&ring->affinity_mask))
+ netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ ring->queue_index);
+
--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: Ido Shamay <idos@mellanox.com>
+Date: Thu, 30 Apr 2015 17:32:46 +0300
+Subject: net/mlx4_en: Schedule napi when RX buffers allocation fails
+
+From: Ido Shamay <idos@mellanox.com>
+
+[ Upstream commit 07841f9d94c11afe00c0498cf242edf4075729f4 ]
+
+When system is out of memory, refilling of RX buffers fails while
+the driver continue to pass the received packets to the kernel stack.
+At some point, when all RX buffers deplete, driver may fall into a
+sleep, and not recover when memory for new RX buffers is once again
+availible. This is because hardware does not have valid descriptors,
+so no interrupt will be generated for the driver to return to work
+in napi context. Fix it by schedule the napi poll function from
+stats_task delayed workqueue, as long as the allocations fail.
+
+Signed-off-by: Ido Shamay <idos@mellanox.com>
+Signed-off-by: Amir Vadai <amirv@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 26 +++++++++++++++++++++++--
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1
+ 3 files changed, 26 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_ptp_overflow_check(mdev);
+
++ mlx4_en_recover_from_oom(priv);
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
+ }
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struc
+ return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ }
+
++static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
++{
++ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
++ return ring->prod == ring->cons;
++}
++
+ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+ {
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct m
+ ring->cons, ring->prod);
+
+ /* Unmap and free Rx buffers */
+- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
+- while (ring->cons != ring->prod) {
++ while (!mlx4_en_is_ring_empty(ring)) {
+ index = ring->cons & ring->size_mask;
+ en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+ mlx4_en_free_rx_desc(priv, ring, index);
+@@ -491,6 +496,23 @@ err_allocator:
+ return err;
+ }
+
++/* We recover from out of memory by scheduling our napi poll
++ * function (mlx4_en_process_cq), which tries to allocate
++ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
++ */
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
++{
++ int ring;
++
++ if (!priv->port_up)
++ return;
++
++ for (ring = 0; ring < priv->rx_ring_num; ring++) {
++ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++ napi_reschedule(&priv->rx_cq[ring]->napi);
++ }
++}
++
+ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride)
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -790,6 +790,7 @@ int mlx4_en_activate_tx_ring(struct mlx4
+ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
+ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
+ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring **pring,
+ u32 size, u16 stride, int node);
--- /dev/null
+From foo@baz Fri May 8 13:16:04 CEST 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 28 Apr 2015 11:43:15 +0800
+Subject: route: Use ipv4_mtu instead of raw rt_pmtu
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit cb6ccf09d6b94bec4def1ac5cf4678d12b216474 ]
+
+The commit 3cdaa5be9e81a914e633a6be7b7d2ef75b528562 ("ipv4: Don't
+increase PMTU with Datagram Too Big message") broke PMTU in cases
+where the rt_pmtu value has expired but is smaller than the new
+PMTU value.
+
+This obsolete rt_pmtu then prevents the new PMTU value from being
+installed.
+
+Fixes: 3cdaa5be9e81 ("ipv4: Don't increase PMTU with Datagram Too Big message")
+Reported-by: Gerd v. Egidy <gerd.von.egidy@intra2net.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -963,10 +963,7 @@ static void __ip_rt_update_pmtu(struct r
+ if (dst_metric_locked(dst, RTAX_MTU))
+ return;
+
+- if (dst->dev->mtu < mtu)
+- return;
+-
+- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
++ if (ipv4_mtu(dst) < mtu)
+ return;
+
+ if (mtu < ip_rt_min_pmtu)