]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 2 Feb 2019 11:19:48 +0000 (12:19 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 2 Feb 2019 11:19:48 +0000 (12:19 +0100)
added patches:
ipv6-consider-sk_bound_dev_if-when-binding-a-socket-to-an-address.patch
l2tp-copy-4-more-bytes-to-linear-part-if-necessary.patch
l2tp-fix-reading-optional-fields-of-l2tpv3.patch
l2tp-remove-l2specific_len-dependency-in-l2tp_core.patch
net-mlx4_core-add-masking-for-a-few-queries-on-hca-caps.patch
net-rose-fix-null-ax25_cb-kernel-panic.patch
netrom-switch-to-sock-timer-api.patch
ucc_geth-reset-bql-queue-when-stopping-device.patch

queue-4.4/ipv6-consider-sk_bound_dev_if-when-binding-a-socket-to-an-address.patch [new file with mode: 0644]
queue-4.4/l2tp-copy-4-more-bytes-to-linear-part-if-necessary.patch [new file with mode: 0644]
queue-4.4/l2tp-fix-reading-optional-fields-of-l2tpv3.patch [new file with mode: 0644]
queue-4.4/l2tp-remove-l2specific_len-dependency-in-l2tp_core.patch [new file with mode: 0644]
queue-4.4/net-mlx4_core-add-masking-for-a-few-queries-on-hca-caps.patch [new file with mode: 0644]
queue-4.4/net-rose-fix-null-ax25_cb-kernel-panic.patch [new file with mode: 0644]
queue-4.4/netrom-switch-to-sock-timer-api.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/ucc_geth-reset-bql-queue-when-stopping-device.patch [new file with mode: 0644]

diff --git a/queue-4.4/ipv6-consider-sk_bound_dev_if-when-binding-a-socket-to-an-address.patch b/queue-4.4/ipv6-consider-sk_bound_dev_if-when-binding-a-socket-to-an-address.patch
new file mode 100644 (file)
index 0000000..cb55136
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: David Ahern <dsahern@gmail.com>
+Date: Wed, 2 Jan 2019 18:57:09 -0800
+Subject: ipv6: Consider sk_bound_dev_if when binding a socket to an address
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit c5ee066333ebc322a24a00a743ed941a0c68617e ]
+
+IPv6 does not consider if the socket is bound to a device when binding
+to an address. The result is that a socket can be bound to eth0 and then
+bound to the address of eth1. If the device is a VRF, the result is that
+a socket can only be bound to an address in the default VRF.
+
+Resolve by considering the device if sk_bound_dev_if is set.
+
+This problem exists from the beginning of git history.
+
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/af_inet6.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -345,6 +345,9 @@ int inet6_bind(struct socket *sock, stru
+                                       err = -EINVAL;
+                                       goto out_unlock;
+                               }
++                      }
++
++                      if (sk->sk_bound_dev_if) {
+                               dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+                               if (!dev) {
+                                       err = -ENODEV;
diff --git a/queue-4.4/l2tp-copy-4-more-bytes-to-linear-part-if-necessary.patch b/queue-4.4/l2tp-copy-4-more-bytes-to-linear-part-if-necessary.patch
new file mode 100644 (file)
index 0000000..ad36e59
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: Jacob Wen <jian.w.wen@oracle.com>
+Date: Thu, 31 Jan 2019 15:18:56 +0800
+Subject: l2tp: copy 4 more bytes to linear part if necessary
+
+From: Jacob Wen <jian.w.wen@oracle.com>
+
+[ Upstream commit 91c524708de6207f59dd3512518d8a1c7b434ee3 ]
+
+The size of L2TPv2 header with all optional fields is 14 bytes.
+l2tp_udp_recv_core only moves 10 bytes to the linear part of a
+skb. This may lead to l2tp_recv_common read data outside of a skb.
+
+This patch make sure that there is at least 14 bytes in the linear
+part of a skb to meet the maximum need of l2tp_udp_recv_core and
+l2tp_recv_common. The minimum size of both PPP HDLC-like frame and
+Ethernet frame is larger than 14 bytes, so we are safe to do so.
+
+Also remove L2TP_HDR_SIZE_NOSEQ, it is unused now.
+
+Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts")
+Suggested-by: Guillaume Nault <gnault@redhat.com>
+Signed-off-by: Jacob Wen <jian.w.wen@oracle.com>
+Acked-by: Guillaume Nault <gnault@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -83,8 +83,7 @@
+ #define L2TP_SLFLAG_S    0x40000000
+ #define L2TP_SL_SEQ_MASK   0x00ffffff
+-#define L2TP_HDR_SIZE_SEQ             10
+-#define L2TP_HDR_SIZE_NOSEQ           6
++#define L2TP_HDR_SIZE_MAX             14
+ /* Default trace flags */
+ #define L2TP_DEFAULT_DEBUG_FLAGS      0
+@@ -860,7 +859,7 @@ static int l2tp_udp_recv_core(struct l2t
+       __skb_pull(skb, sizeof(struct udphdr));
+       /* Short packet? */
+-      if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
++      if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
+               l2tp_info(tunnel, L2TP_MSG_DATA,
+                         "%s: recv short packet (len=%d)\n",
+                         tunnel->name, skb->len);
diff --git a/queue-4.4/l2tp-fix-reading-optional-fields-of-l2tpv3.patch b/queue-4.4/l2tp-fix-reading-optional-fields-of-l2tpv3.patch
new file mode 100644 (file)
index 0000000..0b4650d
--- /dev/null
@@ -0,0 +1,112 @@
+From foo@baz Sat Feb  2 10:57:42 CET 2019
+From: Jacob Wen <jian.w.wen@oracle.com>
+Date: Wed, 30 Jan 2019 14:55:14 +0800
+Subject: l2tp: fix reading optional fields of L2TPv3
+
+From: Jacob Wen <jian.w.wen@oracle.com>
+
+[ Upstream commit 4522a70db7aa5e77526a4079628578599821b193 ]
+
+Use pskb_may_pull() to make sure the optional fields are in skb linear
+parts, so we can safely read them later.
+
+It's easy to reproduce the issue with a net driver that supports paged
+skb data. Just create a L2TPv3 over IP tunnel and then generates some
+network traffic.
+Once reproduced, rx err in /sys/kernel/debug/l2tp/tunnels will increase.
+
+Changes in v4:
+1. s/l2tp_v3_pull_opt/l2tp_v3_ensure_opt_in_linear/
+2. s/tunnel->version != L2TP_HDR_VER_2/tunnel->version == L2TP_HDR_VER_3/
+3. Add 'Fixes' in commit messages.
+
+Changes in v3:
+1. To keep consistency, move the code out of l2tp_recv_common.
+2. Use "net" instead of "net-next", since this is a bug fix.
+
+Changes in v2:
+1. Only fix L2TPv3 to make code simple.
+   To fix both L2TPv3 and L2TPv2, we'd better refactor l2tp_recv_common.
+   It's complicated to do so.
+2. Reloading pointers after pskb_may_pull
+
+Fixes: f7faffa3ff8e ("l2tp: Add L2TPv3 protocol support")
+Fixes: 0d76751fad77 ("l2tp: Add L2TPv3 IP encapsulation (no UDP) support")
+Fixes: a32e0eec7042 ("l2tp: introduce L2TPv3 IP encapsulation support for IPv6")
+Signed-off-by: Jacob Wen <jian.w.wen@oracle.com>
+Acked-by: Guillaume Nault <gnault@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c |    4 ++++
+ net/l2tp/l2tp_core.h |   20 ++++++++++++++++++++
+ net/l2tp/l2tp_ip.c   |    3 +++
+ net/l2tp/l2tp_ip6.c  |    3 +++
+ 4 files changed, 30 insertions(+)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -930,6 +930,10 @@ static int l2tp_udp_recv_core(struct l2t
+               goto error;
+       }
++      if (tunnel->version == L2TP_HDR_VER_3 &&
++          l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto error;
++
+       l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+       return 0;
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -324,6 +324,26 @@ static inline int l2tp_get_l2specific_le
+       }
+ }
++static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
++                                             unsigned char **ptr, unsigned char **optr)
++{
++      int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
++
++      if (opt_len > 0) {
++              int off = *ptr - *optr;
++
++              if (!pskb_may_pull(skb, off + opt_len))
++                      return -1;
++
++              if (skb->data != *optr) {
++                      *optr = skb->data;
++                      *ptr = skb->data + off;
++              }
++      }
++
++      return 0;
++}
++
+ #define l2tp_printk(ptr, type, func, fmt, ...)                                \
+ do {                                                                  \
+       if (((ptr)->debug) & (type))                                    \
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -163,6 +163,9 @@ static int l2tp_ip_recv(struct sk_buff *
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+       }
++      if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto discard;
++
+       l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+       return 0;
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -174,6 +174,9 @@ static int l2tp_ip6_recv(struct sk_buff
+               print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+       }
++      if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++              goto discard;
++
+       l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
+                        tunnel->recv_payload_hook);
+       return 0;
diff --git a/queue-4.4/l2tp-remove-l2specific_len-dependency-in-l2tp_core.patch b/queue-4.4/l2tp-remove-l2specific_len-dependency-in-l2tp_core.patch
new file mode 100644 (file)
index 0000000..7d1e143
--- /dev/null
@@ -0,0 +1,119 @@
+From 62e7b6a57c7b9bf3c6fd99418eeec05b08a85c38 Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Date: Tue, 16 Jan 2018 23:01:55 +0100
+Subject: l2tp: remove l2specific_len dependency in l2tp_core
+
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+
+commit 62e7b6a57c7b9bf3c6fd99418eeec05b08a85c38 upstream.
+
+Remove l2specific_len dependency while building l2tpv3 header or
+parsing the received frame since default L2-Specific Sublayer is
+always four bytes long and we don't need to rely on a user supplied
+value.
+Moreover in l2tp netlink code there are no sanity checks to
+enforce the relation between l2specific_len and l2specific_type,
+so sending a malformed netlink message is possible to set
+l2specific_type to L2TP_L2SPECTYPE_DEFAULT (or even
+L2TP_L2SPECTYPE_NONE) and set l2specific_len to a value greater than
+4 leaking memory on the wire and sending corrupted frames.
+
+Reviewed-by: Guillaume Nault <g.nault@alphalink.fr>
+Tested-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/l2tp/l2tp_core.c |   34 ++++++++++++++++------------------
+ net/l2tp/l2tp_core.h |   11 +++++++++++
+ 2 files changed, 27 insertions(+), 18 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -704,11 +704,9 @@ void l2tp_recv_common(struct l2tp_sessio
+                                "%s: recv data ns=%u, session nr=%u\n",
+                                session->name, ns, session->nr);
+               }
++              ptr += 4;
+       }
+-      /* Advance past L2-specific header, if present */
+-      ptr += session->l2specific_len;
+-
+       if (L2TP_SKB_CB(skb)->has_seq) {
+               /* Received a packet with sequence numbers. If we're the LNS,
+                * check if we sre sending sequence numbers and if not,
+@@ -1030,21 +1028,20 @@ static int l2tp_build_l2tpv3_header(stru
+               memcpy(bufp, &session->cookie[0], session->cookie_len);
+               bufp += session->cookie_len;
+       }
+-      if (session->l2specific_len) {
+-              if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+-                      u32 l2h = 0;
+-                      if (session->send_seq) {
+-                              l2h = 0x40000000 | session->ns;
+-                              session->ns++;
+-                              session->ns &= 0xffffff;
+-                              l2tp_dbg(session, L2TP_MSG_SEQ,
+-                                       "%s: updated ns to %u\n",
+-                                       session->name, session->ns);
+-                      }
++      if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
++              u32 l2h = 0;
+-                      *((__be32 *) bufp) = htonl(l2h);
++              if (session->send_seq) {
++                      l2h = 0x40000000 | session->ns;
++                      session->ns++;
++                      session->ns &= 0xffffff;
++                      l2tp_dbg(session, L2TP_MSG_SEQ,
++                               "%s: updated ns to %u\n",
++                               session->name, session->ns);
+               }
+-              bufp += session->l2specific_len;
++
++              *((__be32 *)bufp) = htonl(l2h);
++              bufp += 4;
+       }
+       if (session->offset)
+               bufp += session->offset;
+@@ -1723,7 +1720,7 @@ int l2tp_session_delete(struct l2tp_sess
+ EXPORT_SYMBOL_GPL(l2tp_session_delete);
+ /* We come here whenever a session's send_seq, cookie_len or
+- * l2specific_len parameters are set.
++ * l2specific_type parameters are set.
+  */
+ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+ {
+@@ -1732,7 +1729,8 @@ void l2tp_session_set_header_len(struct
+               if (session->send_seq)
+                       session->hdr_len += 4;
+       } else {
+-              session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
++              session->hdr_len = 4 + session->cookie_len + session->offset;
++              session->hdr_len += l2tp_get_l2specific_len(session);
+               if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+                       session->hdr_len += 4;
+       }
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -313,6 +313,17 @@ do {                                                                      \
+ #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
+ #endif
++static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
++{
++      switch (session->l2specific_type) {
++      case L2TP_L2SPECTYPE_DEFAULT:
++              return 4;
++      case L2TP_L2SPECTYPE_NONE:
++      default:
++              return 0;
++      }
++}
++
+ #define l2tp_printk(ptr, type, func, fmt, ...)                                \
+ do {                                                                  \
+       if (((ptr)->debug) & (type))                                    \
diff --git a/queue-4.4/net-mlx4_core-add-masking-for-a-few-queries-on-hca-caps.patch b/queue-4.4/net-mlx4_core-add-masking-for-a-few-queries-on-hca-caps.patch
new file mode 100644 (file)
index 0000000..5645af2
--- /dev/null
@@ -0,0 +1,142 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: Aya Levin <ayal@mellanox.com>
+Date: Tue, 22 Jan 2019 15:19:44 +0200
+Subject: net/mlx4_core: Add masking for a few queries on HCA caps
+
+From: Aya Levin <ayal@mellanox.com>
+
+[ Upstream commit a40ded6043658444ee4dd6ee374119e4e98b33fc ]
+
+Driver reads the query HCA capabilities without the corresponding masks.
+Without the correct masks, the base addresses of the queues are
+unaligned.  In addition some reserved bits were wrongly read.  Using the
+correct masks, ensures alignment of the base addresses and allows future
+firmware versions safe use of the reserved bits.
+
+Fixes: ab9c17a009ee ("mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet")
+Fixes: 0ff1fb654bec ("{NET, IB}/mlx4: Add device managed flow steering firmware API")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/fw.c |   75 +++++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 29 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -1906,9 +1906,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ {
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
++      u64 qword_field;
+       u32 dword_field;
+-      int err;
++      u16 word_field;
+       u8 byte_field;
++      int err;
+       static const u8 a0_dmfs_query_hw_steering[] =  {
+               [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
+               [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
+@@ -1936,19 +1938,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+-      MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+-      MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+-      MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+-      MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+-      MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+-      MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+-      MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+-      MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+-      MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+-      MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
++      MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
++      param->qpc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
++      param->log_num_qps = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
++      param->srqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
++      param->log_num_srqs = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
++      param->cqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
++      param->log_num_cqs = byte_field & 0x1f;
++      MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
++      param->altc_base = qword_field;
++      MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
++      param->auxc_base = qword_field;
++      MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
++      param->eqc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
++      param->log_num_eqs = byte_field & 0x1f;
++      MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
++      param->num_sys_eqs = word_field & 0xfff;
++      MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
++      param->rdmarc_base = qword_field & ~((u64)0x1f);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
++      param->log_rd_per_qp = byte_field & 0x7;
+       MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
+       if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
+@@ -1967,22 +1982,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+       /* steering attributes */
+       if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+-              MLX4_GET(param->log_mc_entry_sz, outbox,
+-                       INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_table_sz, outbox,
+-                       INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+-              MLX4_GET(byte_field, outbox,
+-                       INIT_HCA_FS_A0_OFFSET);
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
++              param->log_mc_entry_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
++              param->log_mc_table_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
+               param->dmfs_high_steer_mode =
+                       a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
+       } else {
+               MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+-              MLX4_GET(param->log_mc_entry_sz, outbox,
+-                       INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_hash_sz,  outbox,
+-                       INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+-              MLX4_GET(param->log_mc_table_sz, outbox,
+-                       INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
++              MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
++              param->log_mc_entry_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
++              param->log_mc_hash_sz = byte_field & 0x1f;
++              MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
++              param->log_mc_table_sz = byte_field & 0x1f;
+       }
+       /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+@@ -2006,15 +2020,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+       /* TPT attributes */
+       MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+-      MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
+-      MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
++      MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
++      param->mw_enabled = byte_field >> 7;
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
++      param->log_mpt_sz = byte_field & 0x3f;
+       MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+       /* UAR attributes */
+       MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+-      MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
++      MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
++      param->log_uar_sz = byte_field & 0xf;
+       /* phv_check enable */
+       MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/queue-4.4/net-rose-fix-null-ax25_cb-kernel-panic.patch b/queue-4.4/net-rose-fix-null-ax25_cb-kernel-panic.patch
new file mode 100644 (file)
index 0000000..a084489
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: Bernard Pidoux <f6bvp@free.fr>
+Date: Fri, 25 Jan 2019 11:46:40 +0100
+Subject: net/rose: fix NULL ax25_cb kernel panic
+
+From: Bernard Pidoux <f6bvp@free.fr>
+
+[ Upstream commit b0cf029234f9b18e10703ba5147f0389c382bccc ]
+
+When an internally generated frame is handled by rose_xmit(),
+rose_route_frame() is called:
+
+        if (!rose_route_frame(skb, NULL)) {
+                dev_kfree_skb(skb);
+                stats->tx_errors++;
+                return NETDEV_TX_OK;
+        }
+
+We have the same code sequence in Net/Rom where an internally generated
+frame is handled by nr_xmit() calling nr_route_frame(skb, NULL).
+However, in this function NULL argument is tested while it is not in
+rose_route_frame().
+Then kernel panic occurs later on when calling ax25cmp() with a NULL
+ax25_cb argument as reported many times and recently with syzbot.
+
+We need to test if ax25 is NULL before using it.
+
+Testing:
+Built kernel with CONFIG_ROSE=y.
+
+Signed-off-by: Bernard Pidoux <f6bvp@free.fr>
+Acked-by: Dmitry Vyukov <dvyukov@google.com>
+Reported-by: syzbot+1a2c456a1ea08fa5b5f7@syzkaller.appspotmail.com
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Bernard Pidoux <f6bvp@free.fr>
+Cc: linux-hams@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rose/rose_route.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/rose/rose_route.c
++++ b/net/rose/rose_route.c
+@@ -848,6 +848,7 @@ void rose_link_device_down(struct net_de
+ /*
+  *    Route a frame to an appropriate AX.25 connection.
++ *    A NULL ax25_cb indicates an internally generated frame.
+  */
+ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ {
+@@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb
+       if (skb->len < ROSE_MIN_LEN)
+               return res;
++
++      if (!ax25)
++              return rose_loopback_queue(skb, NULL);
++
+       frametype = skb->data[2];
+       lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+       if (frametype == ROSE_CALL_REQUEST &&
diff --git a/queue-4.4/netrom-switch-to-sock-timer-api.patch b/queue-4.4/netrom-switch-to-sock-timer-api.patch
new file mode 100644 (file)
index 0000000..007e1b4
--- /dev/null
@@ -0,0 +1,95 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 24 Jan 2019 14:18:18 -0800
+Subject: netrom: switch to sock timer API
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 63346650c1a94a92be61a57416ac88c0a47c4327 ]
+
+sk_reset_timer() and sk_stop_timer() properly handle
+sock refcnt for timer function. Switching to them
+could fix a refcounting bug reported by syzbot.
+
+Reported-and-tested-by: syzbot+defa700d16f1bd1b9a05@syzkaller.appspotmail.com
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: linux-hams@vger.kernel.org
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netrom/nr_timer.c |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+-      mod_timer(&nr->t1timer, jiffies + nr->t1);
++      sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
+ }
+ void nr_start_t2timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+-      mod_timer(&nr->t2timer, jiffies + nr->t2);
++      sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
+ }
+ void nr_start_t4timer(struct sock *sk)
+ {
+       struct nr_sock *nr = nr_sk(sk);
+-      mod_timer(&nr->t4timer, jiffies + nr->t4);
++      sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
+ }
+ void nr_start_idletimer(struct sock *sk)
+@@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
+       struct nr_sock *nr = nr_sk(sk);
+       if (nr->idle > 0)
+-              mod_timer(&nr->idletimer, jiffies + nr->idle);
++              sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
+ }
+ void nr_start_heartbeat(struct sock *sk)
+ {
+-      mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
++      sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
+ }
+ void nr_stop_t1timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t1timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t1timer);
+ }
+ void nr_stop_t2timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t2timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t2timer);
+ }
+ void nr_stop_t4timer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->t4timer);
++      sk_stop_timer(sk, &nr_sk(sk)->t4timer);
+ }
+ void nr_stop_idletimer(struct sock *sk)
+ {
+-      del_timer(&nr_sk(sk)->idletimer);
++      sk_stop_timer(sk, &nr_sk(sk)->idletimer);
+ }
+ void nr_stop_heartbeat(struct sock *sk)
+ {
+-      del_timer(&sk->sk_timer);
++      sk_stop_timer(sk, &sk->sk_timer);
+ }
+ int nr_t1timer_running(struct sock *sk)
index 73d4be3f1b1b5b02ba5502608c0e26446ec2c84c..9e11ca740df748443dc5171eab771813ae5dfeb7 100644 (file)
@@ -31,3 +31,11 @@ f2fs-read-page-index-before-freeing.patch
 0003-Revert-loop-Fold-__loop_release-into-loop_release.patch
 s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch
 fs-add-the-fsnotify-call-to-vfs_iter_write.patch
+ipv6-consider-sk_bound_dev_if-when-binding-a-socket-to-an-address.patch
+l2tp-copy-4-more-bytes-to-linear-part-if-necessary.patch
+net-mlx4_core-add-masking-for-a-few-queries-on-hca-caps.patch
+netrom-switch-to-sock-timer-api.patch
+net-rose-fix-null-ax25_cb-kernel-panic.patch
+ucc_geth-reset-bql-queue-when-stopping-device.patch
+l2tp-remove-l2specific_len-dependency-in-l2tp_core.patch
+l2tp-fix-reading-optional-fields-of-l2tpv3.patch
diff --git a/queue-4.4/ucc_geth-reset-bql-queue-when-stopping-device.patch b/queue-4.4/ucc_geth-reset-bql-queue-when-stopping-device.patch
new file mode 100644 (file)
index 0000000..1927262
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Sat Feb  2 11:39:00 CET 2019
+From: Mathias Thore <mathias.thore@infinera.com>
+Date: Mon, 28 Jan 2019 10:07:47 +0100
+Subject: ucc_geth: Reset BQL queue when stopping device
+
+From: Mathias Thore <mathias.thore@infinera.com>
+
+[ Upstream commit e15aa3b2b1388c399c1a2ce08550d2cc4f7e3e14 ]
+
+After a timeout event caused by for example a broadcast storm, when
+the MAC and PHY are reset, the BQL TX queue needs to be reset as
+well. Otherwise, the device will exhibit severe performance issues
+even after the storm has ended.
+
+Co-authored-by: David Gounaris <david.gounaris@infinera.com>
+Signed-off-by: Mathias Thore <mathias.thore@infinera.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/ucc_geth.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_
+       u16 i, j;
+       u8 __iomem *bd;
++      netdev_reset_queue(ugeth->ndev);
++
+       ug_info = ugeth->ug_info;
+       uf_info = &ug_info->uf_info;