]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Jul 2016 23:56:24 +0000 (16:56 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Jul 2016 23:56:24 +0000 (16:56 -0700)
added patches:
ax.25-close-socket-connection-on-session-completion.patch
bpf-perf-delay-release-of-bpf-prog-after-grace-period.patch
bpf-try-harder-on-clones-when-writing-into-skb.patch
bridge-fix-ipv6-mc-snooping-if-bridge-has-no-ipv6-address.patch
crypto-user-re-add-size-check-for-crypto_msg_getalg.patch
crypto-ux500-memmove-the-right-size.patch
crypto-vmx-increase-priority-of-aes-cbc-cipher.patch
esp-fix-esn-generation-under-udp-encapsulation.patch
ipmr-ip6mr-initialize-the-last-assert-time-of-mfc-entries.patch
neigh-explicitly-declare-rcu-bh-read-side-critical-section-in-neigh_xmit.patch
net-alx-work-around-the-dma-rx-overflow-issue.patch
net-don-t-forget-pr_fmt-on-net_dbg_ratelimited-for-config_dynamic_debug.patch
net-macb-fix-default-configuration-for-gmac-on-at91.patch
net_sched-fix-pfifo_head_drop-behavior-vs-backlog.patch
netem-fix-a-use-after-free.patch
sit-correct-ip-protocol-used-in-ipip6_err.patch
sock_diag-do-not-broadcast-raw-socket-destruction.patch

17 files changed:
queue-4.4/ax.25-close-socket-connection-on-session-completion.patch [new file with mode: 0644]
queue-4.4/bpf-perf-delay-release-of-bpf-prog-after-grace-period.patch [new file with mode: 0644]
queue-4.4/bpf-try-harder-on-clones-when-writing-into-skb.patch [new file with mode: 0644]
queue-4.4/bridge-fix-ipv6-mc-snooping-if-bridge-has-no-ipv6-address.patch [new file with mode: 0644]
queue-4.4/crypto-user-re-add-size-check-for-crypto_msg_getalg.patch [new file with mode: 0644]
queue-4.4/crypto-ux500-memmove-the-right-size.patch [new file with mode: 0644]
queue-4.4/crypto-vmx-increase-priority-of-aes-cbc-cipher.patch [new file with mode: 0644]
queue-4.4/esp-fix-esn-generation-under-udp-encapsulation.patch [new file with mode: 0644]
queue-4.4/ipmr-ip6mr-initialize-the-last-assert-time-of-mfc-entries.patch [new file with mode: 0644]
queue-4.4/neigh-explicitly-declare-rcu-bh-read-side-critical-section-in-neigh_xmit.patch [new file with mode: 0644]
queue-4.4/net-alx-work-around-the-dma-rx-overflow-issue.patch [new file with mode: 0644]
queue-4.4/net-don-t-forget-pr_fmt-on-net_dbg_ratelimited-for-config_dynamic_debug.patch [new file with mode: 0644]
queue-4.4/net-macb-fix-default-configuration-for-gmac-on-at91.patch [new file with mode: 0644]
queue-4.4/net_sched-fix-pfifo_head_drop-behavior-vs-backlog.patch [new file with mode: 0644]
queue-4.4/netem-fix-a-use-after-free.patch [new file with mode: 0644]
queue-4.4/sit-correct-ip-protocol-used-in-ipip6_err.patch [new file with mode: 0644]
queue-4.4/sock_diag-do-not-broadcast-raw-socket-destruction.patch [new file with mode: 0644]

diff --git a/queue-4.4/ax.25-close-socket-connection-on-session-completion.patch b/queue-4.4/ax.25-close-socket-connection-on-session-completion.patch
new file mode 100644 (file)
index 0000000..256d8af
--- /dev/null
@@ -0,0 +1,107 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Basil Gunn <basil@pacabunga.com>
+Date: Thu, 16 Jun 2016 09:42:30 -0700
+Subject: AX.25: Close socket connection on session completion
+
+From: Basil Gunn <basil@pacabunga.com>
+
+[ Upstream commit 4a7d99ea1b27734558feb6833f180cd38a159940 ]
+
+A socket connection made in ax.25 is not closed when session is
+completed.  The heartbeat timer is stopped prematurely and this is
+where the socket gets closed. Allow heatbeat timer to run to close
+socket. Symptom occurs in kernels >= 4.2.0
+
+Originally sent 6/15/2016. Resend with distribution list matching
+scripts/maintainer.pl output.
+
+Signed-off-by: Basil Gunn <basil@pacabunga.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ax25/af_ax25.c        |    3 ++-
+ net/ax25/ax25_ds_timer.c  |    5 ++++-
+ net/ax25/ax25_std_timer.c |    5 ++++-
+ net/ax25/ax25_subr.c      |    3 ++-
+ 4 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -976,7 +976,8 @@ static int ax25_release(struct socket *s
+                       release_sock(sk);
+                       ax25_disconnect(ax25, 0);
+                       lock_sock(sk);
+-                      ax25_destroy_socket(ax25);
++                      if (!sock_flag(ax25->sk, SOCK_DESTROY))
++                              ax25_destroy_socket(ax25);
+                       break;
+               case AX25_STATE_3:
+--- a/net/ax25/ax25_ds_timer.c
++++ b/net/ax25/ax25_ds_timer.c
+@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *a
+       switch (ax25->state) {
+       case AX25_STATE_0:
++      case AX25_STATE_2:
+               /* Magic here: If we listen() and a new link dies before it
+                  is accepted() it isn't 'dead' so doesn't get removed. */
+               if (!sk || sock_flag(sk, SOCK_DESTROY) ||
+@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *a
+                               sock_hold(sk);
+                               ax25_destroy_socket(ax25);
+                               bh_unlock_sock(sk);
++                              /* Ungrab socket and destroy it */
+                               sock_put(sk);
+                       } else
+                               ax25_destroy_socket(ax25);
+@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
+       case AX25_STATE_2:
+               if (ax25->n2count == ax25->n2) {
+                       ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
+-                      ax25_disconnect(ax25, ETIMEDOUT);
++                      if (!sock_flag(ax25->sk, SOCK_DESTROY))
++                              ax25_disconnect(ax25, ETIMEDOUT);
+                       return;
+               } else {
+                       ax25->n2count++;
+--- a/net/ax25/ax25_std_timer.c
++++ b/net/ax25/ax25_std_timer.c
+@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *
+       switch (ax25->state) {
+       case AX25_STATE_0:
++      case AX25_STATE_2:
+               /* Magic here: If we listen() and a new link dies before it
+                  is accepted() it isn't 'dead' so doesn't get removed. */
+               if (!sk || sock_flag(sk, SOCK_DESTROY) ||
+@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *
+                               sock_hold(sk);
+                               ax25_destroy_socket(ax25);
+                               bh_unlock_sock(sk);
++                              /* Ungrab socket and destroy it */
+                               sock_put(sk);
+                       } else
+                               ax25_destroy_socket(ax25);
+@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax
+       case AX25_STATE_2:
+               if (ax25->n2count == ax25->n2) {
+                       ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
+-                      ax25_disconnect(ax25, ETIMEDOUT);
++                      if (!sock_flag(ax25->sk, SOCK_DESTROY))
++                              ax25_disconnect(ax25, ETIMEDOUT);
+                       return;
+               } else {
+                       ax25->n2count++;
+--- a/net/ax25/ax25_subr.c
++++ b/net/ax25/ax25_subr.c
+@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int
+ {
+       ax25_clear_queues(ax25);
+-      ax25_stop_heartbeat(ax25);
++      if (!sock_flag(ax25->sk, SOCK_DESTROY))
++              ax25_stop_heartbeat(ax25);
+       ax25_stop_t1timer(ax25);
+       ax25_stop_t2timer(ax25);
+       ax25_stop_t3timer(ax25);
diff --git a/queue-4.4/bpf-perf-delay-release-of-bpf-prog-after-grace-period.patch b/queue-4.4/bpf-perf-delay-release-of-bpf-prog-after-grace-period.patch
new file mode 100644 (file)
index 0000000..4534a12
--- /dev/null
@@ -0,0 +1,53 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Mon, 27 Jun 2016 21:38:11 +0200
+Subject: bpf, perf: delay release of BPF prog after grace period
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit ceb56070359b7329b5678b5d95a376fcb24767be ]
+
+Commit dead9f29ddcc ("perf: Fix race in BPF program unregister") moved
+destruction of BPF program from free_event_rcu() callback to __free_event(),
+which is problematic if used with tail calls: if prog A is attached as
+trace event directly, but at the same time present in a tail call map used
+by another trace event program elsewhere, then we need to delay destruction
+via RCU grace period since it can still be in use by the program doing the
+tail call (the prog first needs to be dropped from the tail call map, then
+trace event with prog A attached destroyed, so we get immediate destruction).
+
+Fixes: dead9f29ddcc ("perf: Fix race in BPF program unregister")
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Cc: Jann Horn <jann@thejh.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h  |    4 ++++
+ kernel/events/core.c |    2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -198,6 +198,10 @@ static inline struct bpf_prog *bpf_prog_
+ static inline void bpf_prog_put(struct bpf_prog *prog)
+ {
+ }
++
++static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
++{
++}
+ #endif /* CONFIG_BPF_SYSCALL */
+ /* verifier prototypes for helper functions called from eBPF programs */
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7101,7 +7101,7 @@ static void perf_event_free_bpf_prog(str
+       prog = event->tp_event->prog;
+       if (prog) {
+               event->tp_event->prog = NULL;
+-              bpf_prog_put(prog);
++              bpf_prog_put_rcu(prog);
+       }
+ }
diff --git a/queue-4.4/bpf-try-harder-on-clones-when-writing-into-skb.patch b/queue-4.4/bpf-try-harder-on-clones-when-writing-into-skb.patch
new file mode 100644 (file)
index 0000000..062d221
--- /dev/null
@@ -0,0 +1,161 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 19 Feb 2016 23:05:25 +0100
+Subject: bpf: try harder on clones when writing into skb
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 3697649ff29e0f647565eed04b27a7779c646a22 ]
+
+When we're dealing with clones and the area is not writeable, try
+harder and get a copy via pskb_expand_head(). Replace also other
+occurences in tc actions with the new skb_try_make_writable().
+
+Reported-by: Ashhad Sheikh <ashhadsheikh394@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h |    7 +++++++
+ net/core/filter.c      |   18 ++++++++++--------
+ net/sched/act_csum.c   |    8 ++------
+ net/sched/act_nat.c    |   18 +++++-------------
+ 4 files changed, 24 insertions(+), 27 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2564,6 +2564,13 @@ static inline int skb_clone_writable(con
+              skb_headroom(skb) + len <= skb->hdr_len;
+ }
++static inline int skb_try_make_writable(struct sk_buff *skb,
++                                      unsigned int write_len)
++{
++      return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
++             pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++}
++
+ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+                           int cloned)
+ {
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1275,9 +1275,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u
+        */
+       if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
+               return -EFAULT;
+-
+-      if (unlikely(skb_cloned(skb) &&
+-                   !skb_clone_writable(skb, offset + len)))
++      if (unlikely(skb_try_make_writable(skb, offset + len)))
+               return -EFAULT;
+       ptr = skb_header_pointer(skb, offset, len, buf);
+@@ -1321,8 +1319,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u
+       if (unlikely((u32) offset > 0xffff))
+               return -EFAULT;
+-      if (unlikely(skb_cloned(skb) &&
+-                   !skb_clone_writable(skb, offset + sizeof(sum))))
++      if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+               return -EFAULT;
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+@@ -1367,9 +1364,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u
+       if (unlikely((u32) offset > 0xffff))
+               return -EFAULT;
+-
+-      if (unlikely(skb_cloned(skb) &&
+-                   !skb_clone_writable(skb, offset + sizeof(sum))))
++      if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+               return -EFAULT;
+       ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+@@ -1554,6 +1549,13 @@ bool bpf_helper_changes_skb_data(void *f
+               return true;
+       if (func == bpf_skb_vlan_pop)
+               return true;
++      if (func == bpf_skb_store_bytes)
++              return true;
++      if (func == bpf_l3_csum_replace)
++              return true;
++      if (func == bpf_l4_csum_replace)
++              return true;
++
+       return false;
+ }
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -105,9 +105,7 @@ static void *tcf_csum_skb_nextlayer(stru
+       int hl = ihl + jhl;
+       if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
+-          (skb_cloned(skb) &&
+-           !skb_clone_writable(skb, hl + ntkoff) &&
+-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++          skb_try_make_writable(skb, hl + ntkoff))
+               return NULL;
+       else
+               return (void *)(skb_network_header(skb) + ihl);
+@@ -365,9 +363,7 @@ static int tcf_csum_ipv4(struct sk_buff
+       }
+       if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
+-              if (skb_cloned(skb) &&
+-                  !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
+-                  pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++              if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
+                       goto fail;
+               ip_send_check(ip_hdr(skb));
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -126,9 +126,7 @@ static int tcf_nat(struct sk_buff *skb,
+               addr = iph->daddr;
+       if (!((old_addr ^ addr) & mask)) {
+-              if (skb_cloned(skb) &&
+-                  !skb_clone_writable(skb, sizeof(*iph) + noff) &&
+-                  pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++              if (skb_try_make_writable(skb, sizeof(*iph) + noff))
+                       goto drop;
+               new_addr &= mask;
+@@ -156,9 +154,7 @@ static int tcf_nat(struct sk_buff *skb,
+               struct tcphdr *tcph;
+               if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
+-                  (skb_cloned(skb) &&
+-                   !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
+-                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++                  skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
+                       goto drop;
+               tcph = (void *)(skb_network_header(skb) + ihl);
+@@ -171,9 +167,7 @@ static int tcf_nat(struct sk_buff *skb,
+               struct udphdr *udph;
+               if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
+-                  (skb_cloned(skb) &&
+-                   !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
+-                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++                  skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
+                       goto drop;
+               udph = (void *)(skb_network_header(skb) + ihl);
+@@ -213,10 +207,8 @@ static int tcf_nat(struct sk_buff *skb,
+               if ((old_addr ^ addr) & mask)
+                       break;
+-              if (skb_cloned(skb) &&
+-                  !skb_clone_writable(skb, ihl + sizeof(*icmph) +
+-                                           sizeof(*iph) + noff) &&
+-                  pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++              if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
++                                        sizeof(*iph) + noff))
+                       goto drop;
+               icmph = (void *)(skb_network_header(skb) + ihl);
diff --git a/queue-4.4/bridge-fix-ipv6-mc-snooping-if-bridge-has-no-ipv6-address.patch b/queue-4.4/bridge-fix-ipv6-mc-snooping-if-bridge-has-no-ipv6-address.patch
new file mode 100644 (file)
index 0000000..0c5013f
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: daniel <daniel@dd-wrt.com>
+Date: Fri, 24 Jun 2016 12:35:18 +0200
+Subject: Bridge: Fix ipv6 mc snooping if bridge has no ipv6 address
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: daniel <daniel@dd-wrt.com>
+
+[ Upstream commit 0888d5f3c0f183ea6177355752ada433d370ac89 ]
+
+The bridge is falsly dropping ipv6 mulitcast packets if there is:
+ 1. No ipv6 address assigned on the brigde.
+ 2. No external mld querier present.
+ 3. The internal querier enabled.
+
+When the bridge fails to build mld queries, because it has no
+ipv6 address, it slilently returns, but keeps the local querier enabled.
+This specific case causes confusing packet loss.
+
+Ipv6 multicast snooping can only work if:
+ a) An external querier is present
+ OR
+ b) The bridge has an ipv6 address an is capable of sending own queries
+
+Otherwise it has to forward/flood the ipv6 multicast traffic,
+because snooping cannot work.
+
+This patch fixes the issue by adding a flag to the bridge struct that
+indicates that there is currently no ipv6 address assinged to the bridge
+and returns a false state for the local querier in
+__br_multicast_querier_exists().
+
+Special thanks to Linus Lüssing.
+
+Fixes: d1d81d4c3dd8 ("bridge: check return value of ipv6_dev_get_saddr()")
+Signed-off-by: Daniel Danzberger <daniel@dd-wrt.com>
+Acked-by: Linus Lüssing <linus.luessing@c0d3.blue>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c |    4 ++++
+ net/bridge/br_private.h   |   23 +++++++++++++++++++----
+ 2 files changed, 23 insertions(+), 4 deletions(-)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_
+       if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                              &ip6h->saddr)) {
+               kfree_skb(skb);
++              br->has_ipv6_addr = 0;
+               return NULL;
+       }
++
++      br->has_ipv6_addr = 1;
+       ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+       hopopt = (u8 *)(ip6h + 1);
+@@ -1736,6 +1739,7 @@ void br_multicast_init(struct net_bridge
+       br->ip6_other_query.delay_time = 0;
+       br->ip6_querier.port = NULL;
+ #endif
++      br->has_ipv6_addr = 1;
+       spin_lock_init(&br->multicast_lock);
+       setup_timer(&br->multicast_router_timer,
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -301,6 +301,7 @@ struct net_bridge
+       u8                              multicast_disabled:1;
+       u8                              multicast_querier:1;
+       u8                              multicast_query_use_ifaddr:1;
++      u8                              has_ipv6_addr:1;
+       u32                             hash_elasticity;
+       u32                             hash_max;
+@@ -574,10 +575,22 @@ static inline bool br_multicast_is_route
+ static inline bool
+ __br_multicast_querier_exists(struct net_bridge *br,
+-                            struct bridge_mcast_other_query *querier)
++                              struct bridge_mcast_other_query *querier,
++                              const bool is_ipv6)
+ {
++      bool own_querier_enabled;
++
++      if (br->multicast_querier) {
++              if (is_ipv6 && !br->has_ipv6_addr)
++                      own_querier_enabled = false;
++              else
++                      own_querier_enabled = true;
++      } else {
++              own_querier_enabled = false;
++      }
++
+       return time_is_before_jiffies(querier->delay_time) &&
+-             (br->multicast_querier || timer_pending(&querier->timer));
++             (own_querier_enabled || timer_pending(&querier->timer));
+ }
+ static inline bool br_multicast_querier_exists(struct net_bridge *br,
+@@ -585,10 +598,12 @@ static inline bool br_multicast_querier_
+ {
+       switch (eth->h_proto) {
+       case (htons(ETH_P_IP)):
+-              return __br_multicast_querier_exists(br, &br->ip4_other_query);
++              return __br_multicast_querier_exists(br,
++                      &br->ip4_other_query, false);
+ #if IS_ENABLED(CONFIG_IPV6)
+       case (htons(ETH_P_IPV6)):
+-              return __br_multicast_querier_exists(br, &br->ip6_other_query);
++              return __br_multicast_querier_exists(br,
++                      &br->ip6_other_query, true);
+ #endif
+       default:
+               return false;
diff --git a/queue-4.4/crypto-user-re-add-size-check-for-crypto_msg_getalg.patch b/queue-4.4/crypto-user-re-add-size-check-for-crypto_msg_getalg.patch
new file mode 100644 (file)
index 0000000..aa80378
--- /dev/null
@@ -0,0 +1,39 @@
+From 055ddaace03580455a7b7dbea8e93d62acee61fc Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@googlemail.com>
+Date: Wed, 22 Jun 2016 20:29:37 +0200
+Subject: crypto: user - re-add size check for CRYPTO_MSG_GETALG
+
+From: Mathias Krause <minipli@googlemail.com>
+
+commit 055ddaace03580455a7b7dbea8e93d62acee61fc upstream.
+
+Commit 9aa867e46565 ("crypto: user - Add CRYPTO_MSG_DELRNG")
+accidentally removed the minimum size check for CRYPTO_MSG_GETALG
+netlink messages. This allows userland to send a truncated
+CRYPTO_MSG_GETALG message as short as a netlink header only making
+crypto_report() operate on uninitialized memory by accessing data
+beyond the end of the netlink message.
+
+Fix this be re-adding the minimum required size of CRYPTO_MSG_GETALG
+messages to the crypto_msg_min[] array.
+
+Fixes: 9aa867e46565 ("crypto: user - Add CRYPTO_MSG_DELRNG")
+Signed-off-by: Mathias Krause <minipli@googlemail.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/crypto_user.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_N
+       [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
++      [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = 0,
+ };
diff --git a/queue-4.4/crypto-ux500-memmove-the-right-size.patch b/queue-4.4/crypto-ux500-memmove-the-right-size.patch
new file mode 100644 (file)
index 0000000..cf9c9ad
--- /dev/null
@@ -0,0 +1,43 @@
+From 19ced623db2fe91604d69f7d86b03144c5107739 Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Wed, 8 Jun 2016 14:56:39 +0200
+Subject: crypto: ux500 - memmove the right size
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit 19ced623db2fe91604d69f7d86b03144c5107739 upstream.
+
+The hash buffer is really HASH_BLOCK_SIZE bytes, someone
+must have thought that memmove takes n*u32 words by mistake.
+Tests work as good/bad as before after this patch.
+
+Cc: Joakim Bech <joakim.bech@linaro.org>
+Reported-by: David Binderman <linuxdev.baldrick@gmail.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ux500/hash/hash_core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -797,7 +797,7 @@ static int hash_process_data(struct hash
+                                               &device_data->state);
+                               memmove(req_ctx->state.buffer,
+                                       device_data->state.buffer,
+-                                      HASH_BLOCK_SIZE / sizeof(u32));
++                                      HASH_BLOCK_SIZE);
+                               if (ret) {
+                                       dev_err(device_data->dev,
+                                               "%s: hash_resume_state() failed!\n",
+@@ -848,7 +848,7 @@ static int hash_process_data(struct hash
+                       memmove(device_data->state.buffer,
+                               req_ctx->state.buffer,
+-                              HASH_BLOCK_SIZE / sizeof(u32));
++                              HASH_BLOCK_SIZE);
+                       if (ret) {
+                               dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
+                                       __func__);
diff --git a/queue-4.4/crypto-vmx-increase-priority-of-aes-cbc-cipher.patch b/queue-4.4/crypto-vmx-increase-priority-of-aes-cbc-cipher.patch
new file mode 100644 (file)
index 0000000..3db271a
--- /dev/null
@@ -0,0 +1,57 @@
+From 12d3f49e1ffbbf8cbbb60acae5a21103c5c841ac Mon Sep 17 00:00:00 2001
+From: Anton Blanchard <anton@samba.org>
+Date: Fri, 10 Jun 2016 16:47:03 +1000
+Subject: crypto: vmx - Increase priority of aes-cbc cipher
+
+From: Anton Blanchard <anton@samba.org>
+
+commit 12d3f49e1ffbbf8cbbb60acae5a21103c5c841ac upstream.
+
+All of the VMX AES ciphers (AES, AES-CBC and AES-CTR) are set at
+priority 1000. Unfortunately this means we never use AES-CBC and
+AES-CTR, because the base AES-CBC cipher that is implemented on
+top of AES inherits its priority.
+
+To fix this, AES-CBC and AES-CTR have to be a higher priority. Set
+them to 2000.
+
+Testing on a POWER8 with:
+
+cryptsetup benchmark --cipher aes --key-size 256
+
+Shows decryption speed increase from 402.4 MB/s to 3069.2 MB/s,
+over 7x faster. Thanks to Mike Strosaker for helping me debug
+this issue.
+
+Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
+Signed-off-by: Anton Blanchard <anton@samba.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/vmx/aes_cbc.c |    2 +-
+ drivers/crypto/vmx/aes_ctr.c |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = {
+       .cra_name = "cbc(aes)",
+       .cra_driver_name = "p8_aes_cbc",
+       .cra_module = THIS_MODULE,
+-      .cra_priority = 1000,
++      .cra_priority = 2000,
+       .cra_type = &crypto_blkcipher_type,
+       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+       .cra_alignmask = 0,
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = {
+       .cra_name = "ctr(aes)",
+       .cra_driver_name = "p8_aes_ctr",
+       .cra_module = THIS_MODULE,
+-      .cra_priority = 1000,
++      .cra_priority = 2000,
+       .cra_type = &crypto_blkcipher_type,
+       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+       .cra_alignmask = 0,
diff --git a/queue-4.4/esp-fix-esn-generation-under-udp-encapsulation.patch b/queue-4.4/esp-fix-esn-generation-under-udp-encapsulation.patch
new file mode 100644 (file)
index 0000000..88a3ec9
--- /dev/null
@@ -0,0 +1,166 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 18 Jun 2016 13:03:36 +0800
+Subject: esp: Fix ESN generation under UDP encapsulation
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 962fcef33b03395051367181a0549d29d109d9a4 ]
+
+Blair Steven noticed that ESN in conjunction with UDP encapsulation
+is broken because we set the temporary ESP header to the wrong spot.
+
+This patch fixes this by first of all using the right spot, i.e.,
+4 bytes off the real ESP header, and then saving this information
+so that after encryption we can restore it properly.
+
+Fixes: 7021b2e1cddd ("esp4: Switch to new AEAD interface")
+Reported-by: Blair Steven <Blair.Steven@alliedtelesis.co.nz>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/esp4.c |   52 ++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 32 insertions(+), 20 deletions(-)
+
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -23,6 +23,11 @@ struct esp_skb_cb {
+       void *tmp;
+ };
++struct esp_output_extra {
++      __be32 seqhi;
++      u32 esphoff;
++};
++
+ #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
+@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_stat
+  *
+  * TODO: Use spare space in skb for this where possible.
+  */
+-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
++static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
+ {
+       unsigned int len;
+-      len = seqhilen;
++      len = extralen;
+       len += crypto_aead_ivsize(aead);
+@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto
+       return kmalloc(len, GFP_ATOMIC);
+ }
+-static inline __be32 *esp_tmp_seqhi(void *tmp)
++static inline void *esp_tmp_extra(void *tmp)
+ {
+-      return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
++      return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
+ }
+-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
++
++static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
+ {
+       return crypto_aead_ivsize(aead) ?
+-             PTR_ALIGN((u8 *)tmp + seqhilen,
+-                       crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
++             PTR_ALIGN((u8 *)tmp + extralen,
++                       crypto_aead_alignmask(aead) + 1) : tmp + extralen;
+ }
+ static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
+@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk
+ {
+       struct ip_esp_hdr *esph = (void *)(skb->data + offset);
+       void *tmp = ESP_SKB_CB(skb)->tmp;
+-      __be32 *seqhi = esp_tmp_seqhi(tmp);
++      __be32 *seqhi = esp_tmp_extra(tmp);
+       esph->seq_no = esph->spi;
+       esph->spi = *seqhi;
+@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk
+ static void esp_output_restore_header(struct sk_buff *skb)
+ {
+-      esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
++      void *tmp = ESP_SKB_CB(skb)->tmp;
++      struct esp_output_extra *extra = esp_tmp_extra(tmp);
++
++      esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
++                              sizeof(__be32));
+ }
+ static void esp_output_done_esn(struct crypto_async_request *base, int err)
+@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct c
+ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+       int err;
++      struct esp_output_extra *extra;
+       struct ip_esp_hdr *esph;
+       struct crypto_aead *aead;
+       struct aead_request *req;
+@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state
+       int tfclen;
+       int nfrags;
+       int assoclen;
+-      int seqhilen;
+-      __be32 *seqhi;
++      int extralen;
+       __be64 seqno;
+       /* skb is pure payload to encrypt */
+@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state
+       nfrags = err;
+       assoclen = sizeof(*esph);
+-      seqhilen = 0;
++      extralen = 0;
+       if (x->props.flags & XFRM_STATE_ESN) {
+-              seqhilen += sizeof(__be32);
+-              assoclen += seqhilen;
++              extralen += sizeof(*extra);
++              assoclen += sizeof(__be32);
+       }
+-      tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
++      tmp = esp_alloc_tmp(aead, nfrags, extralen);
+       if (!tmp) {
+               err = -ENOMEM;
+               goto error;
+       }
+-      seqhi = esp_tmp_seqhi(tmp);
+-      iv = esp_tmp_iv(aead, tmp, seqhilen);
++      extra = esp_tmp_extra(tmp);
++      iv = esp_tmp_iv(aead, tmp, extralen);
+       req = esp_tmp_req(aead, iv);
+       sg = esp_req_sg(aead, req);
+@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state
+        * encryption.
+        */
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+-              esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
+-              *seqhi = esph->spi;
++              extra->esphoff = (unsigned char *)esph -
++                               skb_transport_header(skb);
++              esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
++              extra->seqhi = esph->spi;
+               esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+               aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+       }
+@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *
+               goto out;
+       ESP_SKB_CB(skb)->tmp = tmp;
+-      seqhi = esp_tmp_seqhi(tmp);
++      seqhi = esp_tmp_extra(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
+       req = esp_tmp_req(aead, iv);
+       sg = esp_req_sg(aead, req);
diff --git a/queue-4.4/ipmr-ip6mr-initialize-the-last-assert-time-of-mfc-entries.patch b/queue-4.4/ipmr-ip6mr-initialize-the-last-assert-time-of-mfc-entries.patch
new file mode 100644 (file)
index 0000000..441537f
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Tom Goff <thomas.goff@ll.mit.edu>
+Date: Thu, 23 Jun 2016 16:11:57 -0400
+Subject: ipmr/ip6mr: Initialize the last assert time of mfc entries.
+
+From: Tom Goff <thomas.goff@ll.mit.edu>
+
+[ Upstream commit 70a0dec45174c976c64b4c8c1d0898581f759948 ]
+
+This fixes wrong-interface signaling on 32-bit platforms for entries
+created when jiffies > 2^31 + MFC_ASSERT_THRESH.
+
+Signed-off-by: Tom Goff <thomas.goff@ll.mit.edu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ipmr.c  |    4 +++-
+ net/ipv6/ip6mr.c |    1 +
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -882,8 +882,10 @@ static struct mfc_cache *ipmr_cache_allo
+ {
+       struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
+-      if (c)
++      if (c) {
++              c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
+               c->mfc_un.res.minvif = MAXVIFS;
++      }
+       return c;
+ }
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_al
+       struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
+       if (!c)
+               return NULL;
++      c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
+       c->mfc_un.res.minvif = MAXMIFS;
+       return c;
+ }
diff --git a/queue-4.4/neigh-explicitly-declare-rcu-bh-read-side-critical-section-in-neigh_xmit.patch b/queue-4.4/neigh-explicitly-declare-rcu-bh-read-side-critical-section-in-neigh_xmit.patch
new file mode 100644 (file)
index 0000000..500f5ed
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: David Barroso <dbarroso@fastly.com>
+Date: Tue, 28 Jun 2016 11:16:43 +0300
+Subject: neigh: Explicitly declare RCU-bh read side critical section in neigh_xmit()
+
+From: David Barroso <dbarroso@fastly.com>
+
+[ Upstream commit b560f03ddfb072bca65e9440ff0dc4f9b1d1f056 ]
+
+neigh_xmit() expects to be called inside an RCU-bh read side critical
+section, and while one of its two current callers gets this right, the
+other one doesn't.
+
+More specifically, neigh_xmit() has two callers, mpls_forward() and
+mpls_output(), and while both callers call neigh_xmit() under
+rcu_read_lock(), this provides sufficient protection for neigh_xmit()
+only in the case of mpls_forward(), as that is always called from
+softirq context and therefore doesn't need explicit BH protection,
+while mpls_output() can be called from process context with softirqs
+enabled.
+
+When mpls_output() is called from process context, with softirqs
+enabled, we can be preempted by a softirq at any time, and RCU-bh
+considers the completion of a softirq as signaling the end of any
+pending read-side critical sections, so if we do get a softirq
+while we are in the part of neigh_xmit() that expects to be run inside
+an RCU-bh read side critical section, we can end up with an unexpected
+RCU grace period running right in the middle of that critical section,
+making things go boom.
+
+This patch fixes this impedance mismatch in the callee, by making
+neigh_xmit() always take rcu_read_{,un}lock_bh() around the code that
+expects to be treated as an RCU-bh read side critical section, as this
+seems a safer option than fixing it in the callers.
+
+Fixes: 4fd3d7d9e868f ("neigh: Add helper function neigh_xmit")
+Signed-off-by: David Barroso <dbarroso@fastly.com>
+Signed-off-by: Lennert Buytenhek <lbuytenhek@fastly.com>
+Acked-by: David Ahern <dsa@cumulusnetworks.com>
+Acked-by: Robert Shearman <rshearma@brocade.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/neighbour.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2467,13 +2467,17 @@ int neigh_xmit(int index, struct net_dev
+               tbl = neigh_tables[index];
+               if (!tbl)
+                       goto out;
++              rcu_read_lock_bh();
+               neigh = __neigh_lookup_noref(tbl, addr, dev);
+               if (!neigh)
+                       neigh = __neigh_create(tbl, addr, dev, false);
+               err = PTR_ERR(neigh);
+-              if (IS_ERR(neigh))
++              if (IS_ERR(neigh)) {
++                      rcu_read_unlock_bh();
+                       goto out_kfree_skb;
++              }
+               err = neigh->output(neigh, skb);
++              rcu_read_unlock_bh();
+       }
+       else if (index == NEIGH_LINK_TABLE) {
+               err = dev_hard_header(skb, dev, ntohs(skb->protocol),
diff --git a/queue-4.4/net-alx-work-around-the-dma-rx-overflow-issue.patch b/queue-4.4/net-alx-work-around-the-dma-rx-overflow-issue.patch
new file mode 100644 (file)
index 0000000..d5d8897
--- /dev/null
@@ -0,0 +1,50 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Feng Tang <feng.tang@intel.com>
+Date: Fri, 24 Jun 2016 15:26:05 +0800
+Subject: net: alx: Work around the DMA RX overflow issue
+
+From: Feng Tang <feng.tang@intel.com>
+
+[ Upstream commit 881d0327db37ad917a367c77aff1afa1ee41e0a9 ]
+
+Note: This is a verified backported patch for stable 4.4 kernel, and it
+could also be applied to 4.3/4.2/4.1/3.18/3.16
+
+There is a problem with alx devices, that the network link will be
+lost in 1-5 minutes after the device is up.
+
+>From debugging without datasheet, we found the error always
+happen when the DMA RX address is set to 0x....fc0, which is very
+likely to be a HW/silicon problem.
+
+This patch will apply rx skb with 64 bytes longer space, and if the
+allocated skb has a 0x...fc0 address, it will use skb_resever(skb, 64)
+to advance the address, so that the RX overflow can be avoided.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=70761
+Signed-off-by: Feng Tang <feng.tang@intel.com>
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Ole Lukoie <olelukoie@mail.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/atheros/alx/main.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx
+       while (!cur_buf->skb && next != rxq->read_idx) {
+               struct alx_rfd *rfd = &rxq->rfd[cur];
+-              skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
++              skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
+               if (!skb)
+                       break;
++
++              /* Workround for the HW RX DMA overflow issue */
++              if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++                      skb_reserve(skb, 64);
++
+               dma = dma_map_single(&alx->hw.pdev->dev,
+                                    skb->data, alx->rxbuf_size,
+                                    DMA_FROM_DEVICE);
diff --git a/queue-4.4/net-don-t-forget-pr_fmt-on-net_dbg_ratelimited-for-config_dynamic_debug.patch b/queue-4.4/net-don-t-forget-pr_fmt-on-net_dbg_ratelimited-for-config_dynamic_debug.patch
new file mode 100644 (file)
index 0000000..dcaf466
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Wed, 15 Jun 2016 11:14:53 +0200
+Subject: net: Don't forget pr_fmt on net_dbg_ratelimited for CONFIG_DYNAMIC_DEBUG
+
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+
+[ Upstream commit daddef76c3deaaa7922f9d7b18edbf0a061215c3 ]
+
+The implementation of net_dbg_ratelimited in the CONFIG_DYNAMIC_DEBUG
+case was added with 2c94b5373 ("net: Implement net_dbg_ratelimited() for
+CONFIG_DYNAMIC_DEBUG case"). The implementation strategy was to take the
+usual definition of the dynamic_pr_debug macro, but alter it by adding a
+call to "net_ratelimit()" in the if statement. This is, in fact, the
+correct approach.
+
+However, while doing this, the author of the commit forgot to surround
+fmt by pr_fmt, resulting in unprefixed log messages appearing in the
+console. So, this commit adds back the pr_fmt(fmt) invocation, making
+net_dbg_ratelimited properly consistent across DEBUG, no DEBUG, and
+DYNAMIC_DEBUG cases, and bringing parity with the behavior of
+dynamic_pr_debug as well.
+
+Fixes: 2c94b5373 ("net: Implement net_dbg_ratelimited() for CONFIG_DYNAMIC_DEBUG case")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Cc: Tim Bingham <tbingham@akamai.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/net.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -251,7 +251,8 @@ do {                                                                       \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
+           net_ratelimit())                                            \
+-              __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
++              __dynamic_pr_debug(&descriptor, pr_fmt(fmt),            \
++                                 ##__VA_ARGS__);                      \
+ } while (0)
+ #elif defined(DEBUG)
+ #define net_dbg_ratelimited(fmt, ...)                         \
diff --git a/queue-4.4/net-macb-fix-default-configuration-for-gmac-on-at91.patch b/queue-4.4/net-macb-fix-default-configuration-for-gmac-on-at91.patch
new file mode 100644 (file)
index 0000000..73f24cf
--- /dev/null
@@ -0,0 +1,91 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+Date: Thu, 10 Mar 2016 16:44:32 +0100
+Subject: net: macb: fix default configuration for GMAC on AT91
+
+From: Nicolas Ferre <nicolas.ferre@atmel.com>
+
+[ Upstream commit 6bdaa5e9ed39b3b3328f35d218e8ad5a99cfc4d2 ]
+
+On AT91 SoCs, the User Register (USRIO) exposes a switch to configure the
+"Reduced" or "Traditional" version of the Media Independent Interface
+(RMII vs. MII or RGMII vs. GMII).
+As on the older EMAC version, on GMAC, this switch is set by default to the
+non-reduced type of interface, so use the existing capability and extend it to
+GMII as well. We then keep the current logic in the macb_init() function.
+
+The capabilities of sama5d2, sama5d4 and sama5d3 GEM interface are updated in
+the macb_config structure to be able to properly enable them with a traditional
+interface (GMII or MII).
+
+Reported-by: Romain HENRIET <romain.henriet@l-acoustics.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[cyrille.pitchen@atmel.com: backported to 4.4.y]
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb.c |   13 +++++++------
+ drivers/net/ethernet/cadence/macb.h |    2 +-
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -2405,9 +2405,9 @@ static int macb_init(struct platform_dev
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               val = GEM_BIT(RGMII);
+       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
+-               (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
++               (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
+               val = MACB_BIT(RMII);
+-      else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
++      else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
+               val = MACB_BIT(MII);
+       if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
+@@ -2738,7 +2738,7 @@ static int at91ether_init(struct platfor
+ }
+ static const struct macb_config at91sam9260_config = {
+-      .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
++      .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+ };
+@@ -2751,21 +2751,22 @@ static const struct macb_config pc302gem
+ };
+ static const struct macb_config sama5d2_config = {
+-      .caps = 0,
++      .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+ };
+ static const struct macb_config sama5d3_config = {
+-      .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
++      .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
++            | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+ };
+ static const struct macb_config sama5d4_config = {
+-      .caps = 0,
++      .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .dma_burst_length = 4,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -398,7 +398,7 @@
+ /* Capability mask bits */
+ #define MACB_CAPS_ISR_CLEAR_ON_WRITE          0x00000001
+ #define MACB_CAPS_USRIO_HAS_CLKEN             0x00000002
+-#define MACB_CAPS_USRIO_DEFAULT_IS_MII                0x00000004
++#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII   0x00000004
+ #define MACB_CAPS_NO_GIGABIT_HALF             0x00000008
+ #define MACB_CAPS_FIFO_MODE                   0x10000000
+ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE      0x20000000
diff --git a/queue-4.4/net_sched-fix-pfifo_head_drop-behavior-vs-backlog.patch b/queue-4.4/net_sched-fix-pfifo_head_drop-behavior-vs-backlog.patch
new file mode 100644 (file)
index 0000000..d7c8b6f
--- /dev/null
@@ -0,0 +1,47 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 12 Jun 2016 20:01:25 -0700
+Subject: net_sched: fix pfifo_head_drop behavior vs backlog
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6c0d54f1897d229748d4f41ef919078db6db2123 ]
+
+When the qdisc is full, we drop a packet at the head of the queue,
+queue the current skb and return NET_XMIT_CN
+
+Now we track backlog on upper qdiscs, we need to call
+qdisc_tree_reduce_backlog(), even if the qlen did not change.
+
+Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: WANG Cong <xiyou.wangcong@gmail.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_fifo.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff
+ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
++      unsigned int prev_backlog;
++
+       if (likely(skb_queue_len(&sch->q) < sch->limit))
+               return qdisc_enqueue_tail(skb, sch);
++      prev_backlog = sch->qstats.backlog;
+       /* queue full, remove one skb to fulfill the limit */
+       __qdisc_queue_drop_head(sch, &sch->q);
+       qdisc_qstats_drop(sch);
+       qdisc_enqueue_tail(skb, sch);
++      qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
+       return NET_XMIT_CN;
+ }
diff --git a/queue-4.4/netem-fix-a-use-after-free.patch b/queue-4.4/netem-fix-a-use-after-free.patch
new file mode 100644 (file)
index 0000000..16e7c0a
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 20 Jun 2016 15:00:43 -0700
+Subject: netem: fix a use after free
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 21de12ee5568fd1aec47890c72967abf791ac80a ]
+
+If the packet was dropped by lower qdisc, then we must not
+access it later.
+
+Save qdisc_pkt_len(skb) in a temp variable.
+
+Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: WANG Cong <xiyou.wangcong@gmail.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_netem.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -650,14 +650,14 @@ deliver:
+ #endif
+                       if (q->qdisc) {
++                              unsigned int pkt_len = qdisc_pkt_len(skb);
+                               int err = qdisc_enqueue(skb, q->qdisc);
+-                              if (unlikely(err != NET_XMIT_SUCCESS)) {
+-                                      if (net_xmit_drop_count(err)) {
+-                                              qdisc_qstats_drop(sch);
+-                                              qdisc_tree_reduce_backlog(sch, 1,
+-                                                                        qdisc_pkt_len(skb));
+-                                      }
++                              if (err != NET_XMIT_SUCCESS &&
++                                  net_xmit_drop_count(err)) {
++                                      qdisc_qstats_drop(sch);
++                                      qdisc_tree_reduce_backlog(sch, 1,
++                                                                pkt_len);
+                               }
+                               goto tfifo_dequeue;
+                       }
diff --git a/queue-4.4/sit-correct-ip-protocol-used-in-ipip6_err.patch b/queue-4.4/sit-correct-ip-protocol-used-in-ipip6_err.patch
new file mode 100644 (file)
index 0000000..d97d91e
--- /dev/null
@@ -0,0 +1,58 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Simon Horman <simon.horman@netronome.com>
+Date: Thu, 16 Jun 2016 17:06:19 +0900
+Subject: sit: correct IP protocol used in ipip6_err
+
+From: Simon Horman <simon.horman@netronome.com>
+
+[ Upstream commit d5d8760b78d0cfafe292f965f599988138b06a70 ]
+
+Since 32b8a8e59c9c ("sit: add IPv4 over IPv4 support")
+ipip6_err() may be called for packets whose IP protocol is
+IPPROTO_IPIP as well as those whose IP protocol is IPPROTO_IPV6.
+
+In the case of IPPROTO_IPIP packets the correct protocol value is not
+passed to ipv4_update_pmtu() or ipv4_redirect().
+
+This patch resolves this problem by using the IP protocol of the packet
+rather than a hard-coded value. This appears to be consistent
+with the usage of the protocol of a packet by icmp_socket_deliver()
+the caller of ipip6_err().
+
+I was able to exercise the redirect case by using a setup where an ICMP
+redirect was received for the destination of the encapsulated packet.
+However, it appears that although incorrect the protocol field is not used
+in this case and thus no problem manifests.  On inspection it does not
+appear that a problem will manifest in the fragmentation needed/update pmtu
+case either.
+
+In short I believe this is a cosmetic fix. None the less, the use of
+IPPROTO_IPV6 seems wrong and confusing.
+
+Reviewed-by: Dinan Gunawardena <dinan.gunawardena@netronome.com>
+Signed-off-by: Simon Horman <simon.horman@netronome.com>
+Acked-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/sit.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb
+       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-                               t->parms.link, 0, IPPROTO_IPV6, 0);
++                               t->parms.link, 0, iph->protocol, 0);
+               err = 0;
+               goto out;
+       }
+       if (type == ICMP_REDIRECT) {
+               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+-                            IPPROTO_IPV6, 0);
++                            iph->protocol, 0);
+               err = 0;
+               goto out;
+       }
diff --git a/queue-4.4/sock_diag-do-not-broadcast-raw-socket-destruction.patch b/queue-4.4/sock_diag-do-not-broadcast-raw-socket-destruction.patch
new file mode 100644 (file)
index 0000000..956a516
--- /dev/null
@@ -0,0 +1,45 @@
+From foo@baz Wed Jul  6 16:50:47 PDT 2016
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 24 Jun 2016 16:02:35 -0400
+Subject: sock_diag: do not broadcast raw socket destruction
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 9a0fee2b552b1235fb1706ae1fc664ae74573be8 ]
+
+Diag intends to broadcast tcp_sk and udp_sk socket destruction.
+Testing sk->sk_protocol for IPPROTO_TCP/IPPROTO_UDP alone is not
+sufficient for this. Raw sockets can have the same type.
+
+Add a test for sk->sk_type.
+
+Fixes: eb4cb008529c ("sock_diag: define destruction multicast groups")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sock_diag.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/linux/sock_diag.h
++++ b/include/linux/sock_diag.h
+@@ -35,6 +35,9 @@ enum sknetlink_groups sock_diag_destroy_
+ {
+       switch (sk->sk_family) {
+       case AF_INET:
++              if (sk->sk_type == SOCK_RAW)
++                      return SKNLGRP_NONE;
++
+               switch (sk->sk_protocol) {
+               case IPPROTO_TCP:
+                       return SKNLGRP_INET_TCP_DESTROY;
+@@ -44,6 +47,9 @@ enum sknetlink_groups sock_diag_destroy_
+                       return SKNLGRP_NONE;
+               }
+       case AF_INET6:
++              if (sk->sk_type == SOCK_RAW)
++                      return SKNLGRP_NONE;
++
+               switch (sk->sk_protocol) {
+               case IPPROTO_TCP:
+                       return SKNLGRP_INET6_TCP_DESTROY;