--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Thu, 16 Feb 2017 17:22:46 +0100
+Subject: dccp: fix freeing skb too early for IPV6_RECVPKTINFO
+
+From: Andrey Konovalov <andreyknvl@google.com>
+
+
+[ Upstream commit 5edabca9d4cff7f1f2b68f0bac55ef99d9798ba4 ]
+
+In the current DCCP implementation an skb for a DCCP_PKT_REQUEST packet
+is forcibly freed via __kfree_skb in dccp_rcv_state_process if
+dccp_v6_conn_request successfully returns.
+
+However, if IPV6_RECVPKTINFO is set on a socket, the address of the skb
+is saved to ireq->pktopts and the ref count for skb is incremented in
+dccp_v6_conn_request, so skb is still in use. Nevertheless, it gets freed
+in dccp_rcv_state_process.
+
+Fix by calling consume_skb instead of doing goto discard and therefore
+calling __kfree_skb.
+
+Similar fixes for TCP:
+
+fb7e2399ec17f1004c0e0ccfd17439f8759ede01 [TCP]: skb is unexpectedly freed.
+0aea76d35c9651d55bbaf746e7914e5f9ae5a25d tcp: SYN packets are now
+simply consumed
+
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/input.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/dccp/input.c
++++ b/net/dccp/input.c
+@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *
+ if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
+ skb) < 0)
+ return 1;
+- goto discard;
++ consume_skb(skb);
++ return 0;
+ }
+ if (dh->dccph_type == DCCP_PKT_RESET)
+ goto discard;
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 21 Feb 2017 09:33:18 +0100
+Subject: ip: fix IP_CHECKSUM handling
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+
+[ Upstream commit ca4ef4574f1ee5252e2cd365f8f5d5bafd048f32 ]
+
+The skbs processed by ip_cmsg_recv() are not guaranteed to
+be linear e.g. when sending UDP packets over loopback with
+MSGMORE.
+Using csum_partial() on [potentially] the whole skb len
+is dangerous; instead be on the safe side and use skb_checksum().
+
+Thanks to syzkaller team to detect the issue and provide the
+reproducer.
+
+v1 -> v2:
+ - move the variable declaration in a tighter scope
+
+Fixes: ad6f939ab193 ("ip: Add offset parameter to ip_cmsg_recv")
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_sockglue.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -105,10 +105,10 @@ static void ip_cmsg_recv_checksum(struct
+ if (skb->ip_summed != CHECKSUM_COMPLETE)
+ return;
+
+- if (offset != 0)
+- csum = csum_sub(csum,
+- csum_partial(skb->data + tlen,
+- offset, 0));
++ if (offset != 0) {
++ int tend_off = skb_transport_offset(skb) + tlen;
++ csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
++ }
+
+ put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
+ }
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 17 Feb 2017 16:19:39 -0500
+Subject: irda: Fix lockdep annotations in hashbin_delete().
+
+From: "David S. Miller" <davem@davemloft.net>
+
+
+[ Upstream commit 4c03b862b12f980456f9de92db6d508a4999b788 ]
+
+A nested lock depth was added to the hasbin_delete() code but it
+doesn't actually work some well and results in tons of lockdep splats.
+
+Fix the code instead to properly drop the lock around the operation
+and just keep peeking the head of the hashbin queue.
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Tested-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/irda/irqueue.c | 34 ++++++++++++++++------------------
+ 1 file changed, 16 insertions(+), 18 deletions(-)
+
+--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+ * for deallocating this structure if it's complex. If not the user can
+ * just supply kfree, which should take care of the job.
+ */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ {
+ irda_queue_t* queue;
+@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin,
+ IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
+
+ /* Synchronize */
+- if ( hashbin->hb_type & HB_LOCK ) {
+- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
+- hashbin_lock_depth++);
+- }
++ if (hashbin->hb_type & HB_LOCK)
++ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+
+ /*
+ * Free the entries in the hashbin, TODO: use hashbin_clear when
+ * it has been shown to work
+ */
+ for (i = 0; i < HASHBIN_SIZE; i ++ ) {
+- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+- while (queue ) {
+- if (free_func)
+- (*free_func)(queue);
+- queue = dequeue_first(
+- (irda_queue_t**) &hashbin->hb_queue[i]);
++ while (1) {
++ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
++
++ if (!queue)
++ break;
++
++ if (free_func) {
++ if (hashbin->hb_type & HB_LOCK)
++ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
++ free_func(queue);
++ if (hashbin->hb_type & HB_LOCK)
++ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
++ }
+ }
+ }
+
+@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin,
+ hashbin->magic = ~HB_MAGIC;
+
+ /* Release lock */
+- if ( hashbin->hb_type & HB_LOCK) {
++ if (hashbin->hb_type & HB_LOCK)
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+-#ifdef CONFIG_LOCKDEP
+- hashbin_lock_depth--;
+-#endif
+- }
+
+ /*
+ * Free the hashbin structure
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 12 Feb 2017 14:03:52 -0800
+Subject: net/llc: avoid BUG_ON() in skb_orphan()
+
+From: Eric Dumazet <edumazet@google.com>
+
+
+[ Upstream commit 8b74d439e1697110c5e5c600643e823eb1dd0762 ]
+
+It seems nobody used LLC since linux-3.12.
+
+Fortunately fuzzers like syzkaller still know how to run this code,
+otherwise it would be no fun.
+
+Setting skb->sk without skb->destructor leads to all kinds of
+bugs, we now prefer to be very strict about it.
+
+Ideally here we would use skb_set_owner() but this helper does not exist yet,
+only CAN seems to have a private helper for that.
+
+Fixes: 376c7311bdb6 ("net: add a temporary sanity check in skb_orphan()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Andrey Konovalov <andreyknvl@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/llc/llc_conn.c | 3 +++
+ net/llc/llc_sap.c | 3 +++
+ 2 files changed, 6 insertions(+)
+
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sa
+ * another trick required to cope with how the PROCOM state
+ * machine works. -acme
+ */
++ skb_orphan(skb);
++ sock_hold(sk);
+ skb->sk = sk;
++ skb->destructor = sock_efree;
+ }
+ if (!sock_owned_by_user(sk))
+ llc_conn_rcv(sk, skb);
+--- a/net/llc/llc_sap.c
++++ b/net/llc/llc_sap.c
+@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *
+
+ ev->type = LLC_SAP_EV_TYPE_PDU;
+ ev->reason = 0;
++ skb_orphan(skb);
++ sock_hold(sk);
+ skb->sk = sk;
++ skb->destructor = sock_efree;
+ llc_sap_state_process(sap, skb);
+ }
+
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+Date: Tue, 21 Feb 2017 18:35:51 +0100
+Subject: net: socket: fix recvmmsg not returning error from sock_error
+
+From: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+
+
+[ Upstream commit e623a9e9dec29ae811d11f83d0074ba254aba374 ]
+
+Commit 34b88a68f26a ("net: Fix use after free in the recvmmsg exit path"),
+changed the exit path of recvmmsg to always return the datagrams
+variable and modified the error paths to set the variable to the error
+code returned by recvmsg if necessary.
+
+However in the case sock_error returned an error, the error code was
+then ignored, and recvmmsg returned 0.
+
+Change the error path of recvmmsg to correctly return the error code
+of sock_error.
+
+The bug was triggered by using recvmmsg on a CAN interface which was
+not up. Linux 4.6 and later return 0 in this case while earlier
+releases returned -ENETDOWN.
+
+Fixes: 34b88a68f26a ("net: Fix use after free in the recvmmsg exit path")
+Signed-off-by: Maxime Jayat <maxime.jayat@mobile-devices.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2185,8 +2185,10 @@ int __sys_recvmmsg(int fd, struct mmsghd
+ return err;
+
+ err = sock_error(sock->sk);
+- if (err)
++ if (err) {
++ datagrams = err;
+ goto out_put;
++ }
+
+ entry = mmsg;
+ compat_entry = (struct compat_mmsghdr __user *)mmsg;
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Anoob Soman <anoob.soman@citrix.com>
+Date: Wed, 15 Feb 2017 20:25:39 +0000
+Subject: packet: Do not call fanout_release from atomic contexts
+
+From: Anoob Soman <anoob.soman@citrix.com>
+
+
+[ Upstream commit 2bd624b4611ffee36422782d16e1c944d1351e98 ]
+
+Commit 6664498280cf ("packet: call fanout_release, while UNREGISTERING a
+netdev"), unfortunately, introduced the following issues.
+
+1. calling mutex_lock(&fanout_mutex) (fanout_release()) from inside
+rcu_read-side critical section. rcu_read_lock disables preemption, most often,
+which prohibits calling sleeping functions.
+
+[ ] include/linux/rcupdate.h:560 Illegal context switch in RCU read-side critical section!
+[ ]
+[ ] rcu_scheduler_active = 1, debug_locks = 0
+[ ] 4 locks held by ovs-vswitchd/1969:
+[ ] #0: (cb_lock){++++++}, at: [<ffffffff8158a6c9>] genl_rcv+0x19/0x40
+[ ] #1: (ovs_mutex){+.+.+.}, at: [<ffffffffa04878ca>] ovs_vport_cmd_del+0x4a/0x100 [openvswitch]
+[ ] #2: (rtnl_mutex){+.+.+.}, at: [<ffffffff81564157>] rtnl_lock+0x17/0x20
+[ ] #3: (rcu_read_lock){......}, at: [<ffffffff81614165>] packet_notifier+0x5/0x3f0
+[ ]
+[ ] Call Trace:
+[ ] [<ffffffff813770c1>] dump_stack+0x85/0xc4
+[ ] [<ffffffff810c9077>] lockdep_rcu_suspicious+0x107/0x110
+[ ] [<ffffffff810a2da7>] ___might_sleep+0x57/0x210
+[ ] [<ffffffff810a2fd0>] __might_sleep+0x70/0x90
+[ ] [<ffffffff8162e80c>] mutex_lock_nested+0x3c/0x3a0
+[ ] [<ffffffff810de93f>] ? vprintk_default+0x1f/0x30
+[ ] [<ffffffff81186e88>] ? printk+0x4d/0x4f
+[ ] [<ffffffff816106dd>] fanout_release+0x1d/0xe0
+[ ] [<ffffffff81614459>] packet_notifier+0x2f9/0x3f0
+
+2. calling mutex_lock(&fanout_mutex) inside spin_lock(&po->bind_lock).
+"sleeping function called from invalid context"
+
+[ ] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620
+[ ] in_atomic(): 1, irqs_disabled(): 0, pid: 1969, name: ovs-vswitchd
+[ ] INFO: lockdep is turned off.
+[ ] Call Trace:
+[ ] [<ffffffff813770c1>] dump_stack+0x85/0xc4
+[ ] [<ffffffff810a2f52>] ___might_sleep+0x202/0x210
+[ ] [<ffffffff810a2fd0>] __might_sleep+0x70/0x90
+[ ] [<ffffffff8162e80c>] mutex_lock_nested+0x3c/0x3a0
+[ ] [<ffffffff816106dd>] fanout_release+0x1d/0xe0
+[ ] [<ffffffff81614459>] packet_notifier+0x2f9/0x3f0
+
+3. calling dev_remove_pack(&fanout->prot_hook), from inside
+spin_lock(&po->bind_lock) or rcu_read-side critical-section. dev_remove_pack()
+-> synchronize_net(), which might sleep.
+
+[ ] BUG: scheduling while atomic: ovs-vswitchd/1969/0x00000002
+[ ] INFO: lockdep is turned off.
+[ ] Call Trace:
+[ ] [<ffffffff813770c1>] dump_stack+0x85/0xc4
+[ ] [<ffffffff81186274>] __schedule_bug+0x64/0x73
+[ ] [<ffffffff8162b8cb>] __schedule+0x6b/0xd10
+[ ] [<ffffffff8162c5db>] schedule+0x6b/0x80
+[ ] [<ffffffff81630b1d>] schedule_timeout+0x38d/0x410
+[ ] [<ffffffff810ea3fd>] synchronize_sched_expedited+0x53d/0x810
+[ ] [<ffffffff810ea6de>] synchronize_rcu_expedited+0xe/0x10
+[ ] [<ffffffff8154eab5>] synchronize_net+0x35/0x50
+[ ] [<ffffffff8154eae3>] dev_remove_pack+0x13/0x20
+[ ] [<ffffffff8161077e>] fanout_release+0xbe/0xe0
+[ ] [<ffffffff81614459>] packet_notifier+0x2f9/0x3f0
+
+4. fanout_release() races with calls from different CPU.
+
+To fix the above problems, remove the call to fanout_release() under
+rcu_read_lock(). Instead, call __dev_remove_pack(&fanout->prot_hook) and
+netdev_run_todo will be happy that &dev->ptype_specific list is empty. In order
+to achieve this, I moved dev_{add,remove}_pack() out of fanout_{add,release} to
+__fanout_{link,unlink}. So, call to {,__}unregister_prot_hook() will make sure
+fanout->prot_hook is removed as well.
+
+Fixes: 6664498280cf ("packet: call fanout_release, while UNREGISTERING a netdev")
+Reported-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Anoob Soman <anoob.soman@citrix.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 31 ++++++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 9 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *s
+ f->arr[f->num_members] = sk;
+ smp_wmb();
+ f->num_members++;
++ if (f->num_members == 1)
++ dev_add_pack(&f->prot_hook);
+ spin_unlock(&f->lock);
+ }
+
+@@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock
+ BUG_ON(i >= f->num_members);
+ f->arr[i] = f->arr[f->num_members - 1];
+ f->num_members--;
++ if (f->num_members == 0)
++ __dev_remove_pack(&f->prot_hook);
+ spin_unlock(&f->lock);
+ }
+
+@@ -1697,7 +1701,6 @@ static int fanout_add(struct sock *sk, u
+ match->prot_hook.func = packet_rcv_fanout;
+ match->prot_hook.af_packet_priv = match;
+ match->prot_hook.id_match = match_fanout_group;
+- dev_add_pack(&match->prot_hook);
+ list_add(&match->list, &fanout_list);
+ }
+ err = -EINVAL;
+@@ -1722,7 +1725,12 @@ out:
+ return err;
+ }
+
+-static void fanout_release(struct sock *sk)
++/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
++ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
++ * It is the responsibility of the caller to call fanout_release_data() and
++ * free the returned packet_fanout (after synchronize_net())
++ */
++static struct packet_fanout *fanout_release(struct sock *sk)
+ {
+ struct packet_sock *po = pkt_sk(sk);
+ struct packet_fanout *f;
+@@ -1732,17 +1740,17 @@ static void fanout_release(struct sock *
+ if (f) {
+ po->fanout = NULL;
+
+- if (atomic_dec_and_test(&f->sk_ref)) {
++ if (atomic_dec_and_test(&f->sk_ref))
+ list_del(&f->list);
+- dev_remove_pack(&f->prot_hook);
+- fanout_release_data(f);
+- kfree(f);
+- }
++ else
++ f = NULL;
+
+ if (po->rollover)
+ kfree_rcu(po->rollover, rcu);
+ }
+ mutex_unlock(&fanout_mutex);
++
++ return f;
+ }
+
+ static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
+@@ -2851,6 +2859,7 @@ static int packet_release(struct socket
+ {
+ struct sock *sk = sock->sk;
+ struct packet_sock *po;
++ struct packet_fanout *f;
+ struct net *net;
+ union tpacket_req_u req_u;
+
+@@ -2890,9 +2899,14 @@ static int packet_release(struct socket
+ packet_set_ring(sk, &req_u, 1, 1);
+ }
+
+- fanout_release(sk);
++ f = fanout_release(sk);
+
+ synchronize_net();
++
++ if (f) {
++ fanout_release_data(f);
++ kfree(f);
++ }
+ /*
+ * Now the socket is dead. No more input will appear.
+ */
+@@ -3866,7 +3880,6 @@ static int packet_notifier(struct notifi
+ }
+ if (msg == NETDEV_UNREGISTER) {
+ packet_cached_dev_reset(po);
+- fanout_release(sk);
+ po->ifindex = -1;
+ if (po->prot_hook.dev)
+ dev_put(po->prot_hook.dev);
--- /dev/null
+From foo@baz Thu Feb 23 21:13:19 CET 2017
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 14 Feb 2017 09:03:51 -0800
+Subject: packet: fix races in fanout_add()
+
+From: Eric Dumazet <edumazet@google.com>
+
+
+[ Upstream commit d199fab63c11998a602205f7ee7ff7c05c97164b ]
+
+Multiple threads can call fanout_add() at the same time.
+
+We need to grab fanout_mutex earlier to avoid races that could
+lead to one thread freeing po->rollover that was set by another thread.
+
+Do the same in fanout_release(), for peace of mind, and to help us
+finding lockdep issues earlier.
+
+Fixes: dc99f600698d ("packet: Add fanout support.")
+Fixes: 0648ab70afe6 ("packet: rollover prepare: per-socket state")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 53 ++++++++++++++++++++++++++-----------------------
+ 1 file changed, 29 insertions(+), 24 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1623,6 +1623,7 @@ static void fanout_release_data(struct p
+
+ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ {
++ struct packet_rollover *rollover = NULL;
+ struct packet_sock *po = pkt_sk(sk);
+ struct packet_fanout *f, *match;
+ u8 type = type_flags & 0xff;
+@@ -1645,23 +1646,28 @@ static int fanout_add(struct sock *sk, u
+ return -EINVAL;
+ }
+
++ mutex_lock(&fanout_mutex);
++
++ err = -EINVAL;
+ if (!po->running)
+- return -EINVAL;
++ goto out;
+
++ err = -EALREADY;
+ if (po->fanout)
+- return -EALREADY;
++ goto out;
+
+ if (type == PACKET_FANOUT_ROLLOVER ||
+ (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
+- po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
+- if (!po->rollover)
+- return -ENOMEM;
+- atomic_long_set(&po->rollover->num, 0);
+- atomic_long_set(&po->rollover->num_huge, 0);
+- atomic_long_set(&po->rollover->num_failed, 0);
++ err = -ENOMEM;
++ rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
++ if (!rollover)
++ goto out;
++ atomic_long_set(&rollover->num, 0);
++ atomic_long_set(&rollover->num_huge, 0);
++ atomic_long_set(&rollover->num_failed, 0);
++ po->rollover = rollover;
+ }
+
+- mutex_lock(&fanout_mutex);
+ match = NULL;
+ list_for_each_entry(f, &fanout_list, list) {
+ if (f->id == id &&
+@@ -1708,11 +1714,11 @@ static int fanout_add(struct sock *sk, u
+ }
+ }
+ out:
+- mutex_unlock(&fanout_mutex);
+- if (err) {
+- kfree(po->rollover);
++ if (err && rollover) {
++ kfree(rollover);
+ po->rollover = NULL;
+ }
++ mutex_unlock(&fanout_mutex);
+ return err;
+ }
+
+@@ -1721,23 +1727,22 @@ static void fanout_release(struct sock *
+ struct packet_sock *po = pkt_sk(sk);
+ struct packet_fanout *f;
+
++ mutex_lock(&fanout_mutex);
+ f = po->fanout;
+- if (!f)
+- return;
++ if (f) {
++ po->fanout = NULL;
+
+- mutex_lock(&fanout_mutex);
+- po->fanout = NULL;
++ if (atomic_dec_and_test(&f->sk_ref)) {
++ list_del(&f->list);
++ dev_remove_pack(&f->prot_hook);
++ fanout_release_data(f);
++ kfree(f);
++ }
+
+- if (atomic_dec_and_test(&f->sk_ref)) {
+- list_del(&f->list);
+- dev_remove_pack(&f->prot_hook);
+- fanout_release_data(f);
+- kfree(f);
++ if (po->rollover)
++ kfree_rcu(po->rollover, rcu);
+ }
+ mutex_unlock(&fanout_mutex);
+-
+- if (po->rollover)
+- kfree_rcu(po->rollover, rcu);
+ }
+
+ static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
rtlwifi-rtl_usb-fix-missing-entry-in-usb-driver-s-private-data.patch
rtc-interface-ignore-expired-timers-when-enqueuing-new-timers.patch
blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch
+net-llc-avoid-bug_on-in-skb_orphan.patch
+packet-fix-races-in-fanout_add.patch
+packet-do-not-call-fanout_release-from-atomic-contexts.patch
+dccp-fix-freeing-skb-too-early-for-ipv6_recvpktinfo.patch
+irda-fix-lockdep-annotations-in-hashbin_delete.patch
+ip-fix-ip_checksum-handling.patch
+net-socket-fix-recvmmsg-not-returning-error-from-sock_error.patch