--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Edward Cree <ecree@solarflare.com>
+Date: Fri, 15 Sep 2017 14:37:38 +0100
+Subject: bpf/verifier: reject BPF_ALU64|BPF_END
+
+From: Edward Cree <ecree@solarflare.com>
+
+
+[ Upstream commit e67b8a685c7c984e834e3181ef4619cd7025a136 ]
+
+Neither ___bpf_prog_run nor the JITs accept it.
+Also adds a new test case.
+
+Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
+Signed-off-by: Edward Cree <ecree@solarflare.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -932,7 +932,8 @@ static int check_alu_op(struct reg_state
+ }
+ } else {
+ if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
+- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
++ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
++ BPF_CLASS(insn->code) == BPF_ALU64) {
+ verbose("BPF_END uses reserved fields\n");
+ return -EINVAL;
+ }
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Meng Xu <mengxu.gatech@gmail.com>
+Date: Tue, 19 Sep 2017 21:49:55 -0400
+Subject: isdn/i4l: fetch the ppp_write buffer in one shot
+
+From: Meng Xu <mengxu.gatech@gmail.com>
+
+
+[ Upstream commit 02388bf87f72e1d47174cd8f81c34443920eb5a0 ]
+
+In isdn_ppp_write(), the header (i.e., protobuf) of the buffer is
+fetched twice from userspace. The first fetch is used to peek at the
+protocol of the message and reset the huptimer if necessary; while the
+second fetch copies in the whole buffer. However, given that buf resides
+in userspace memory, a user process can race to change its memory content
+across fetches. By doing so, we can either avoid resetting the huptimer
+for any type of packets (by first setting proto to PPP_LCP and later
+change to the actual type) or force resetting the huptimer for LCP
+packets.
+
+This patch changes this double-fetch behavior into two single fetches
+decided by condition (lp->isdn_device < 0 || lp->isdn_channel <0).
+A more detailed discussion can be found at
+https://marc.info/?l=linux-kernel&m=150586376926123&w=2
+
+Signed-off-by: Meng Xu <mengxu.gatech@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/i4l/isdn_ppp.c | 37 +++++++++++++++++++++++++------------
+ 1 file changed, 25 insertions(+), 12 deletions(-)
+
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *fil
+ isdn_net_local *lp;
+ struct ippp_struct *is;
+ int proto;
+- unsigned char protobuf[4];
+
+ is = file->private_data;
+
+@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *fil
+ if (!lp)
+ printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
+ else {
+- /*
+- * Don't reset huptimer for
+- * LCP packets. (Echo requests).
+- */
+- if (copy_from_user(protobuf, buf, 4))
+- return -EFAULT;
+- proto = PPP_PROTOCOL(protobuf);
+- if (proto != PPP_LCP)
+- lp->huptimer = 0;
++ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
++ unsigned char protobuf[4];
++ /*
++ * Don't reset huptimer for
++ * LCP packets. (Echo requests).
++ */
++ if (copy_from_user(protobuf, buf, 4))
++ return -EFAULT;
++
++ proto = PPP_PROTOCOL(protobuf);
++ if (proto != PPP_LCP)
++ lp->huptimer = 0;
+
+- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
+ return 0;
++ }
+
+ if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
+ lp->dialstate == 0 &&
+ (lp->flags & ISDN_NET_CONNECTED)) {
+ unsigned short hl;
+ struct sk_buff *skb;
++ unsigned char *cpy_buf;
+ /*
+ * we need to reserve enough space in front of
+ * sk_buff. old call to dev_alloc_skb only reserved
+@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *fil
+ return count;
+ }
+ skb_reserve(skb, hl);
+- if (copy_from_user(skb_put(skb, count), buf, count))
++ cpy_buf = skb_put(skb, count);
++ if (copy_from_user(cpy_buf, buf, count))
+ {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
++
++ /*
++ * Don't reset huptimer for
++ * LCP packets. (Echo requests).
++ */
++ proto = PPP_PROTOCOL(cpy_buf);
++ if (proto != PPP_LCP)
++ lp->huptimer = 0;
++
+ if (is->debug & 0x40) {
+ printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
+ isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Ridge Kennedy <ridge.kennedy@alliedtelesis.co.nz>
+Date: Wed, 22 Feb 2017 14:59:49 +1300
+Subject: l2tp: Avoid schedule while atomic in exit_net
+
+From: Ridge Kennedy <ridge.kennedy@alliedtelesis.co.nz>
+
+
+[ Upstream commit 12d656af4e3d2781b9b9f52538593e1717e7c979 ]
+
+While destroying a network namespace that contains a L2TP tunnel a
+"BUG: scheduling while atomic" can be observed.
+
+Enabling lockdep shows that this is happening because l2tp_exit_net()
+is calling l2tp_tunnel_closeall() (via l2tp_tunnel_delete()) from
+within an RCU critical section.
+
+l2tp_exit_net() takes rcu_read_lock_bh()
+ << list_for_each_entry_rcu() >>
+ l2tp_tunnel_delete()
+ l2tp_tunnel_closeall()
+ __l2tp_session_unhash()
+ synchronize_rcu() << Illegal inside RCU critical section >>
+
+BUG: sleeping function called from invalid context
+in_atomic(): 1, irqs_disabled(): 0, pid: 86, name: kworker/u16:2
+INFO: lockdep is turned off.
+CPU: 2 PID: 86 Comm: kworker/u16:2 Tainted: G W O 4.4.6-at1 #2
+Hardware name: Xen HVM domU, BIOS 4.6.1-xs125300 05/09/2016
+Workqueue: netns cleanup_net
+ 0000000000000000 ffff880202417b90 ffffffff812b0013 ffff880202410ac0
+ ffffffff81870de8 ffff880202417bb8 ffffffff8107aee8 ffffffff81870de8
+ 0000000000000c51 0000000000000000 ffff880202417be0 ffffffff8107b024
+Call Trace:
+ [<ffffffff812b0013>] dump_stack+0x85/0xc2
+ [<ffffffff8107aee8>] ___might_sleep+0x148/0x240
+ [<ffffffff8107b024>] __might_sleep+0x44/0x80
+ [<ffffffff810b21bd>] synchronize_sched+0x2d/0xe0
+ [<ffffffff8109be6d>] ? trace_hardirqs_on+0xd/0x10
+ [<ffffffff8105c7bb>] ? __local_bh_enable_ip+0x6b/0xc0
+ [<ffffffff816a1b00>] ? _raw_spin_unlock_bh+0x30/0x40
+ [<ffffffff81667482>] __l2tp_session_unhash+0x172/0x220
+ [<ffffffff81667397>] ? __l2tp_session_unhash+0x87/0x220
+ [<ffffffff8166888b>] l2tp_tunnel_closeall+0x9b/0x140
+ [<ffffffff81668c74>] l2tp_tunnel_delete+0x14/0x60
+ [<ffffffff81668dd0>] l2tp_exit_net+0x110/0x270
+ [<ffffffff81668d5c>] ? l2tp_exit_net+0x9c/0x270
+ [<ffffffff815001c3>] ops_exit_list.isra.6+0x33/0x60
+ [<ffffffff81501166>] cleanup_net+0x1b6/0x280
+ ...
+
+This bug can easily be reproduced with a few steps:
+
+ $ sudo unshare -n bash # Create a shell in a new namespace
+ # ip link set lo up
+ # ip addr add 127.0.0.1 dev lo
+ # ip l2tp add tunnel remote 127.0.0.1 local 127.0.0.1 tunnel_id 1 \
+ peer_tunnel_id 1 udp_sport 50000 udp_dport 50000
+ # ip l2tp add session name foo tunnel_id 1 session_id 1 \
+ peer_session_id 1
+ # ip link set foo up
+ # exit # Exit the shell, in turn exiting the namespace
+ $ dmesg
+ ...
+ [942121.089216] BUG: scheduling while atomic: kworker/u16:3/13872/0x00000200
+ ...
+
+To fix this, move the call to l2tp_tunnel_closeall() out of the RCU
+critical section, and instead call it from l2tp_tunnel_del_work(), which
+is running from the l2tp_wq workqueue.
+
+Fixes: 2b551c6e7d5b ("l2tp: close sessions before initiating tunnel delete")
+Signed-off-by: Ridge Kennedy <ridge.kennedy@alliedtelesis.co.nz>
+Acked-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1317,6 +1317,9 @@ static void l2tp_tunnel_del_work(struct
+ struct sock *sk = NULL;
+
+ tunnel = container_of(work, struct l2tp_tunnel, del_work);
++
++ l2tp_tunnel_closeall(tunnel);
++
+ sk = l2tp_tunnel_sock_lookup(tunnel);
+ if (!sk)
+ goto out;
+@@ -1642,7 +1645,6 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
+ l2tp_tunnel_inc_refcount(tunnel);
+- l2tp_tunnel_closeall(tunnel);
+ if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+ l2tp_tunnel_dec_refcount(tunnel);
+ return 1;
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Tue, 26 Sep 2017 16:16:43 +0200
+Subject: l2tp: fix race condition in l2tp_tunnel_delete
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+
+[ Upstream commit 62b982eeb4589b2e6d7c01a90590e3a4c2b2ca19 ]
+
+If we try to delete the same tunnel twice, the first delete operation
+does a lookup (l2tp_tunnel_get), finds the tunnel, calls
+l2tp_tunnel_delete, which queues it for deletion by
+l2tp_tunnel_del_work.
+
+The second delete operation also finds the tunnel and calls
+l2tp_tunnel_delete. If the workqueue has already fired and started
+running l2tp_tunnel_del_work, then l2tp_tunnel_delete will queue the
+same tunnel a second time, and try to free the socket again.
+
+Add a dead flag to prevent firing the workqueue twice. Then we can
+remove the check of queue_work's result that was meant to prevent that
+race but doesn't.
+
+Reproducer:
+
+ ip l2tp add tunnel tunnel_id 3000 peer_tunnel_id 4000 local 192.168.0.2 remote 192.168.0.1 encap udp udp_sport 5000 udp_dport 6000
+ ip l2tp add session name l2tp1 tunnel_id 3000 session_id 1000 peer_session_id 2000
+ ip link set l2tp1 up
+ ip l2tp del tunnel tunnel_id 3000
+ ip l2tp del tunnel tunnel_id 3000
+
+Fixes: f8ccac0e4493 ("l2tp: put tunnel socket release on a workqueue")
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Acked-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_core.c | 10 ++++------
+ net/l2tp/l2tp_core.h | 5 ++++-
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1642,14 +1642,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+
+ /* This function is used by the netlink TUNNEL_DELETE command.
+ */
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
+- l2tp_tunnel_inc_refcount(tunnel);
+- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+- l2tp_tunnel_dec_refcount(tunnel);
+- return 1;
++ if (!test_and_set_bit(0, &tunnel->dead)) {
++ l2tp_tunnel_inc_refcount(tunnel);
++ queue_work(l2tp_wq, &tunnel->del_work);
+ }
+- return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -169,6 +169,9 @@ struct l2tp_tunnel_cfg {
+
+ struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
++
++ unsigned long dead;
++
+ struct rcu_head rcu;
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+@@ -252,7 +255,7 @@ int l2tp_tunnel_create(struct net *net,
+ u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ struct l2tp_tunnel **tunnelp);
+ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+ struct l2tp_session *l2tp_session_create(int priv_size,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Christoph Paasch <cpaasch@apple.com>
+Date: Tue, 26 Sep 2017 17:38:50 -0700
+Subject: net: Set sk_prot_creator when cloning sockets to the right proto
+
+From: Christoph Paasch <cpaasch@apple.com>
+
+
+[ Upstream commit 9d538fa60bad4f7b23193c89e843797a1cf71ef3 ]
+
+sk->sk_prot and sk->sk_prot_creator can differ when the app uses
+IPV6_ADDRFORM (transforming an IPv6-socket to an IPv4-one).
+Which is why sk_prot_creator is there to make sure that sk_prot_free()
+does the kmem_cache_free() on the right kmem_cache slab.
+
+Now, if such a socket gets transformed back to a listening socket (using
+connect() with AF_UNSPEC) we will allocate an IPv4 tcp_sock through
+sk_clone_lock() when a new connection comes in. But sk_prot_creator will
+still point to the IPv6 kmem_cache (as everything got copied in
+sk_clone_lock()). When freeing, we will thus put this
+memory back into the IPv6 kmem_cache although it was allocated in the
+IPv4 cache. I have seen memory corruption happening because of this.
+
+With slub-debugging and MEMCG_KMEM enabled this gives the warning
+ "cache_from_obj: Wrong slab cache. TCPv6 but object is from TCP"
+
+A C-program to trigger this:
+
+void main(void)
+{
+ int fd = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP);
+ int new_fd, newest_fd, client_fd;
+ struct sockaddr_in6 bind_addr;
+ struct sockaddr_in bind_addr4, client_addr1, client_addr2;
+ struct sockaddr unsp;
+ int val;
+
+ memset(&bind_addr, 0, sizeof(bind_addr));
+ bind_addr.sin6_family = AF_INET6;
+ bind_addr.sin6_port = ntohs(42424);
+
+ memset(&client_addr1, 0, sizeof(client_addr1));
+ client_addr1.sin_family = AF_INET;
+ client_addr1.sin_port = ntohs(42424);
+ client_addr1.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+ memset(&client_addr2, 0, sizeof(client_addr2));
+ client_addr2.sin_family = AF_INET;
+ client_addr2.sin_port = ntohs(42421);
+ client_addr2.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+ memset(&unsp, 0, sizeof(unsp));
+ unsp.sa_family = AF_UNSPEC;
+
+ bind(fd, (struct sockaddr *)&bind_addr, sizeof(bind_addr));
+
+ listen(fd, 5);
+
+ client_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ connect(client_fd, (struct sockaddr *)&client_addr1, sizeof(client_addr1));
+ new_fd = accept(fd, NULL, NULL);
+ close(fd);
+
+ val = AF_INET;
+ setsockopt(new_fd, SOL_IPV6, IPV6_ADDRFORM, &val, sizeof(val));
+
+ connect(new_fd, &unsp, sizeof(unsp));
+
+ memset(&bind_addr4, 0, sizeof(bind_addr4));
+ bind_addr4.sin_family = AF_INET;
+ bind_addr4.sin_port = ntohs(42421);
+ bind(new_fd, (struct sockaddr *)&bind_addr4, sizeof(bind_addr4));
+
+ listen(new_fd, 5);
+
+ client_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ connect(client_fd, (struct sockaddr *)&client_addr2, sizeof(client_addr2));
+
+ newest_fd = accept(new_fd, NULL, NULL);
+ close(new_fd);
+
+ close(client_fd);
+ close(new_fd);
+}
+
+As far as I can see, this bug has been there since the beginning of the
+git-days.
+
+Signed-off-by: Christoph Paasch <cpaasch@apple.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1488,6 +1488,8 @@ struct sock *sk_clone_lock(const struct
+
+ sock_copy(newsk, sk);
+
++ newsk->sk_prot_creator = sk->sk_prot;
++
+ /* SANITY */
+ get_net(sock_net(newsk));
+ sk_node_init(&newsk->sk_node);
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Willem de Bruijn <willemb@google.com>
+Date: Thu, 14 Sep 2017 17:14:41 -0400
+Subject: packet: hold bind lock when rebinding to fanout hook
+
+From: Willem de Bruijn <willemb@google.com>
+
+
+[ Upstream commit 008ba2a13f2d04c947adc536d19debb8fe66f110 ]
+
+Packet socket bind operations must hold the po->bind_lock. This keeps
+po->running consistent with whether the socket is actually on a ptype
+list to receive packets.
+
+fanout_add unbinds a socket and its packet_rcv/tpacket_rcv call, then
+binds the fanout object to receive through packet_rcv_fanout.
+
+Make it hold the po->bind_lock when testing po->running and rebinding.
+Else, it can race with other rebind operations, such as that in
+packet_set_ring from packet_rcv to tpacket_rcv. Concurrent updates
+can result in a socket being added to a fanout group twice, causing
+use-after-free KASAN bug reports, among others.
+
+Reported independently by both trinity and syzkaller.
+Verified that the syzkaller reproducer passes after this patch.
+
+Fixes: dc99f600698d ("packet: Add fanout support.")
+Reported-by: nixioaming <nixiaoming@huawei.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1429,9 +1429,6 @@ static int fanout_add(struct sock *sk, u
+ return -EINVAL;
+ }
+
+- if (!po->running)
+- return -EINVAL;
+-
+ if (po->fanout)
+ return -EALREADY;
+
+@@ -1469,7 +1466,10 @@ static int fanout_add(struct sock *sk, u
+ list_add(&match->list, &fanout_list);
+ }
+ err = -EINVAL;
+- if (match->type == type &&
++
++ spin_lock(&po->bind_lock);
++ if (po->running &&
++ match->type == type &&
+ match->prot_hook.type == po->prot_hook.type &&
+ match->prot_hook.dev == po->prot_hook.dev) {
+ err = -ENOSPC;
+@@ -1481,6 +1481,13 @@ static int fanout_add(struct sock *sk, u
+ err = 0;
+ }
+ }
++ spin_unlock(&po->bind_lock);
++
++ if (err && !atomic_read(&match->sk_ref)) {
++ list_del(&match->list);
++ kfree(match);
++ }
++
+ out:
+ mutex_unlock(&fanout_mutex);
+ return err;
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Willem de Bruijn <willemb@google.com>
+Date: Tue, 26 Sep 2017 12:19:37 -0400
+Subject: packet: in packet_do_bind, test fanout with bind_lock held
+
+From: Willem de Bruijn <willemb@google.com>
+
+
+[ Upstream commit 4971613c1639d8e5f102c4e797c3bf8f83a5a69e ]
+
+Once a socket has po->fanout set, it remains a member of the group
+until it is destroyed. The prot_hook must be constant and identical
+across sockets in the group.
+
+If fanout_add races with packet_do_bind between the test of po->fanout
+and taking the lock, the bind call may make type or dev inconsistent
+with that of the fanout group.
+
+Hold po->bind_lock when testing po->fanout to avoid this race.
+
+I had to introduce artificial delay (local_bh_enable) to actually
+observe the race.
+
+Fixes: dc99f600698d ("packet: Add fanout support.")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2662,13 +2662,15 @@ static int packet_do_bind(struct sock *s
+ int ret = 0;
+ bool unlisted = false;
+
+- if (po->fanout)
+- return -EINVAL;
+-
+ lock_sock(sk);
+ spin_lock(&po->bind_lock);
+ rcu_read_lock();
+
++ if (po->fanout) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
+ if (name) {
+ dev = dev_get_by_name_rcu(sock_net(sk), name);
+ if (!dev) {
--- /dev/null
+From 42fbb07e2e3a338154ee747bf28dc688bc599b81 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 19 Oct 2017 14:55:29 +0200
+Subject: [PATCH] Revert "bsg-lib: don't free job in bsg_prepare_job"
+
+This reverts commit d9100405a20a71dd620843e0380e38fc50731108 which was
+commit f507b54dccfd8000c517d740bc45f20c74532d18 upstream.
+
+Ben reports:
+ That function doesn't exist here (it was introduced in 4.13).
+ Instead, this backport has modified bsg_create_job(), creating a
+ leak. Please revert this on the 3.18, 4.4 and 4.9 stable
+ branches.
+
+So I'm dropping it from here.
+
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
+---
+ block/bsg-lib.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -147,6 +147,7 @@ static int bsg_create_job(struct device
+ failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+ failjob_rls_job:
++ kfree(job);
+ return -ENOMEM;
+ }
+
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Thu, 14 Sep 2017 02:00:54 +0300
+Subject: sctp: potential read out of bounds in sctp_ulpevent_type_enabled()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+
+[ Upstream commit fa5f7b51fc3080c2b195fa87c7eca7c05e56f673 ]
+
+This code causes a static checker warning because Smatch doesn't trust
+anything that comes from skb->data. I've reviewed this code and I do
+think skb->data can be controlled by the user here.
+
+The sctp_event_subscribe struct has 13 __u8 fields and we want to see
+if ours is non-zero. sn_type can be any value in the 0-USHRT_MAX range.
+We're subtracting SCTP_SN_TYPE_BASE which is 1 << 15 so we could read
+either before the start of the struct or after the end.
+
+This is a very old bug and it's surprising that it would go undetected
+for so long but my theory is that it just doesn't have a big impact so
+it would be hard to notice.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/ulpevent.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/include/net/sctp/ulpevent.h
++++ b/include/net/sctp/ulpevent.h
+@@ -141,8 +141,12 @@ __u16 sctp_ulpevent_get_notification_typ
+ static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ struct sctp_event_subscribe *mask)
+ {
++ int offset = sn_type - SCTP_SN_TYPE_BASE;
+ char *amask = (char *) mask;
+- return amask[sn_type - SCTP_SN_TYPE_BASE];
++
++ if (offset >= sizeof(struct sctp_event_subscribe))
++ return 0;
++ return amask[offset];
+ }
+
+ /* Given an event subscription, is this event enabled? */
x86-mm-disable-preemption-during-cr3-read-write.patch
drm-dp-mst-save-vcpi-with-payloads.patch
ext4-avoid-deadlock-when-expanding-inode-size.patch
+sctp-potential-read-out-of-bounds-in-sctp_ulpevent_type_enabled.patch
+bpf-verifier-reject-bpf_alu64-bpf_end.patch
+packet-hold-bind-lock-when-rebinding-to-fanout-hook.patch
+isdn-i4l-fetch-the-ppp_write-buffer-in-one-shot.patch
+vti-fix-use-after-free-in-vti_tunnel_xmit-vti6_tnl_xmit.patch
+l2tp-avoid-schedule-while-atomic-in-exit_net.patch
+l2tp-fix-race-condition-in-l2tp_tunnel_delete.patch
+packet-in-packet_do_bind-test-fanout-with-bind_lock-held.patch
+net-set-sk_prot_creator-when-cloning-sockets-to-the-right-proto.patch
+revert-bsg-lib-don-t-free-job-in-bsg_prepare_job.patch
--- /dev/null
+From foo@baz Thu Oct 19 14:44:13 CEST 2017
+From: Alexey Kodanev <alexey.kodanev@oracle.com>
+Date: Tue, 26 Sep 2017 15:14:29 +0300
+Subject: vti: fix use after free in vti_tunnel_xmit/vti6_tnl_xmit
+
+From: Alexey Kodanev <alexey.kodanev@oracle.com>
+
+
+[ Upstream commit 36f6ee22d2d66046e369757ec6bbe1c482957ba6 ]
+
+When running LTP IPsec tests, KASan might report:
+
+BUG: KASAN: use-after-free in vti_tunnel_xmit+0xeee/0xff0 [ip_vti]
+Read of size 4 at addr ffff880dc6ad1980 by task swapper/0/0
+...
+Call Trace:
+ <IRQ>
+ dump_stack+0x63/0x89
+ print_address_description+0x7c/0x290
+ kasan_report+0x28d/0x370
+ ? vti_tunnel_xmit+0xeee/0xff0 [ip_vti]
+ __asan_report_load4_noabort+0x19/0x20
+ vti_tunnel_xmit+0xeee/0xff0 [ip_vti]
+ ? vti_init_net+0x190/0x190 [ip_vti]
+ ? save_stack_trace+0x1b/0x20
+ ? save_stack+0x46/0xd0
+ dev_hard_start_xmit+0x147/0x510
+ ? icmp_echo.part.24+0x1f0/0x210
+ __dev_queue_xmit+0x1394/0x1c60
+...
+Freed by task 0:
+ save_stack_trace+0x1b/0x20
+ save_stack+0x46/0xd0
+ kasan_slab_free+0x70/0xc0
+ kmem_cache_free+0x81/0x1e0
+ kfree_skbmem+0xb1/0xe0
+ kfree_skb+0x75/0x170
+ kfree_skb_list+0x3e/0x60
+ __dev_queue_xmit+0x1298/0x1c60
+ dev_queue_xmit+0x10/0x20
+ neigh_resolve_output+0x3a8/0x740
+ ip_finish_output2+0x5c0/0xe70
+ ip_finish_output+0x4ba/0x680
+ ip_output+0x1c1/0x3a0
+ xfrm_output_resume+0xc65/0x13d0
+ xfrm_output+0x1e4/0x380
+ xfrm4_output_finish+0x5c/0x70
+
+Can be fixed if we get skb->len before dst_output().
+
+Fixes: b9959fd3b0fa ("vti: switch to new ip tunnel code")
+Fixes: 22e1b23dafa8 ("vti6: Support inter address family tunneling.")
+Signed-off-by: Alexey Kodanev <alexey.kodanev@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_vti.c | 3 ++-
+ net/ipv6/ip6_vti.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_bu
+ struct ip_tunnel_parm *parms = &tunnel->parms;
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device *tdev; /* Device to other host */
++ int pkt_len = skb->len;
+ int err;
+
+ if (!dst) {
+@@ -199,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_bu
+
+ err = dst_output(skb);
+ if (net_xmit_eval(err) == 0)
+- err = skb->len;
++ err = pkt_len;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ return NETDEV_TX_OK;
+
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -416,6 +416,7 @@ vti6_xmit(struct sk_buff *skb, struct ne
+ struct net_device_stats *stats = &t->dev->stats;
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device *tdev;
++ int pkt_len = skb->len;
+ int err = -1;
+
+ if (!dst)
+@@ -450,7 +451,7 @@ vti6_xmit(struct sk_buff *skb, struct ne
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+- tstats->tx_bytes += skb->len;
++ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else {