--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 13 Aug 2020 21:45:25 +0206
+Subject: af_packet: TPACKET_V3: fix fill status rwlock imbalance
+
+From: John Ogness <john.ogness@linutronix.de>
+
+[ Upstream commit 88fd1cb80daa20af063bce81e1fad14e945a8dc4 ]
+
+After @blk_fill_in_prog_lock is acquired there is an early out vnet
+situation that can occur. In that case, the rwlock needs to be
+released.
+
+Also, since @blk_fill_in_prog_lock is only acquired when @tp_version
+is exactly TPACKET_V3, only release it on that exact condition as
+well.
+
+And finally, add sparse annotation so that it is clearer that
+prb_fill_curr_block() and prb_clear_blk_fill_status() are acquiring
+and releasing @blk_fill_in_prog_lock, respectively. sparse is still
+unable to understand the balance, but the warnings are now on a
+higher level that make more sense.
+
+Fixes: 632ca50f2cbd ("af_packet: TPACKET_V3: replace busy-wait loop")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -949,6 +949,7 @@ static int prb_queue_frozen(struct tpack
+ }
+
+ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
++ __releases(&pkc->blk_fill_in_prog_lock)
+ {
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ atomic_dec(&pkc->blk_fill_in_prog);
+@@ -996,6 +997,7 @@ static void prb_fill_curr_block(char *cu
+ struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd,
+ unsigned int len)
++ __acquires(&pkc->blk_fill_in_prog_lock)
+ {
+ struct tpacket3_hdr *ppd;
+
+@@ -2272,8 +2274,11 @@ static int tpacket_rcv(struct sk_buff *s
+ if (do_vnet &&
+ virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+- vio_le(), true, 0))
++ vio_le(), true, 0)) {
++ if (po->tp_version == TPACKET_V3)
++ prb_clear_blk_fill_status(&po->rx_ring);
+ goto drop_n_account;
++ }
+
+ if (po->tp_version <= TPACKET_V2) {
+ packet_increment_rx_head(po, &po->rx_ring);
+@@ -2379,7 +2384,7 @@ static int tpacket_rcv(struct sk_buff *s
+ __clear_bit(slot_id, po->rx_ring.rx_owner_map);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk);
+- } else {
++ } else if (po->tp_version == TPACKET_V3) {
+ prb_clear_blk_fill_status(&po->rx_ring);
+ }
+
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Xie He <xie.he.0141@gmail.com>
+Date: Wed, 5 Aug 2020 18:50:40 -0700
+Subject: drivers/net/wan/lapbether: Added needed_headroom and a skb->len check
+
+From: Xie He <xie.he.0141@gmail.com>
+
+[ Upstream commit c7ca03c216acb14466a713fedf1b9f2c24994ef2 ]
+
+1. Added a skb->len check
+
+This driver expects upper layers to include a pseudo header of 1 byte
+when passing down a skb for transmission. This driver will read this
+1-byte header. This patch added a skb->len check before reading the
+header to make sure the header exists.
+
+2. Changed to use needed_headroom instead of hard_header_len to request
+necessary headroom to be allocated
+
+In net/packet/af_packet.c, the function packet_snd first reserves a
+headroom of length (dev->hard_header_len + dev->needed_headroom).
+Then if the socket is a SOCK_DGRAM socket, it calls dev_hard_header,
+which calls dev->header_ops->create, to create the link layer header.
+If the socket is a SOCK_RAW socket, it "un-reserves" a headroom of
+length (dev->hard_header_len), and assumes the user to provide the
+appropriate link layer header.
+
+So according to the logic of af_packet.c, dev->hard_header_len should
+be the length of the header that would be created by
+dev->header_ops->create.
+
+However, this driver doesn't provide dev->header_ops, so logically
+dev->hard_header_len should be 0.
+
+So we should use dev->needed_headroom instead of dev->hard_header_len
+to request necessary headroom to be allocated.
+
+This change fixes kernel panic when this driver is used with AF_PACKET
+SOCK_RAW sockets.
+
+Call stack when panic:
+
+[ 168.399197] skbuff: skb_under_panic: text:ffffffff819d95fb len:20
+put:14 head:ffff8882704c0a00 data:ffff8882704c09fd tail:0x11 end:0xc0
+dev:veth0
+...
+[ 168.399255] Call Trace:
+[ 168.399259] skb_push.cold+0x14/0x24
+[ 168.399262] eth_header+0x2b/0xc0
+[ 168.399267] lapbeth_data_transmit+0x9a/0xb0 [lapbether]
+[ 168.399275] lapb_data_transmit+0x22/0x2c [lapb]
+[ 168.399277] lapb_transmit_buffer+0x71/0xb0 [lapb]
+[ 168.399279] lapb_kick+0xe3/0x1c0 [lapb]
+[ 168.399281] lapb_data_request+0x76/0xc0 [lapb]
+[ 168.399283] lapbeth_xmit+0x56/0x90 [lapbether]
+[ 168.399286] dev_hard_start_xmit+0x91/0x1f0
+[ 168.399289] ? irq_init_percpu_irqstack+0xc0/0x100
+[ 168.399291] __dev_queue_xmit+0x721/0x8e0
+[ 168.399295] ? packet_parse_headers.isra.0+0xd2/0x110
+[ 168.399297] dev_queue_xmit+0x10/0x20
+[ 168.399298] packet_sendmsg+0xbf0/0x19b0
+......
+
+Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Cc: Martin Schiller <ms@dev.tdt.de>
+Cc: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Xie He <xie.he.0141@gmail.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wan/lapbether.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -160,6 +160,12 @@ static netdev_tx_t lapbeth_xmit(struct s
+ if (!netif_running(dev))
+ goto drop;
+
++ /* There should be a pseudo header of 1 byte added by upper layers.
++ * Check to make sure it is there before reading it.
++ */
++ if (skb->len < 1)
++ goto drop;
++
+ switch (skb->data[0]) {
+ case X25_IFACE_DATA:
+ break;
+@@ -308,6 +314,7 @@ static void lapbeth_setup(struct net_dev
+ dev->netdev_ops = &lapbeth_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->type = ARPHRD_X25;
++ dev->hard_header_len = 0;
+ dev->mtu = 1000;
+ dev->addr_len = 0;
+ }
+@@ -334,7 +341,8 @@ static int lapbeth_new_device(struct net
+ * then this driver prepends a length field of 2 bytes,
+ * then the underlying Ethernet device prepends its own header.
+ */
+- ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
++ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
++ + dev->needed_headroom;
+
+ lapbeth = netdev_priv(ndev);
+ lapbeth->axdev = ndev;
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+Date: Tue, 11 Aug 2020 20:33:24 +0200
+Subject: net: initialize fastreuse on inet_inherit_port
+
+From: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+
+[ Upstream commit d76f3351cea2d927fdf70dd7c06898235035e84e ]
+
+In the case of TPROXY, bind_conflict optimizations for SO_REUSEADDR or
+SO_REUSEPORT are broken, possibly resulting in O(n) instead of O(1) bind
+behaviour or in the incorrect reuse of a bind.
+
+the kernel keeps track for each bind_bucket if all sockets in the
+bind_bucket support SO_REUSEADDR or SO_REUSEPORT in two fastreuse flags.
+These flags allow skipping the costly bind_conflict check when possible
+(meaning when all sockets have the proper SO_REUSE option).
+
+For every socket added to a bind_bucket, these flags need to be updated.
+As soon as a socket that does not support reuse is added, the flag is
+set to false and will never go back to true, unless the bind_bucket is
+deleted.
+
+Note that there is no mechanism to re-evaluate these flags when a socket
+is removed (this might make sense when removing a socket that would not
+allow reuse; this leaves room for a future patch).
+
+For this optimization to work, it is mandatory that these flags are
+properly initialized and updated.
+
+When a child socket is created from a listen socket in
+__inet_inherit_port, the TPROXY case could create a new bind bucket
+without properly initializing these flags, thus preventing the
+optimization to work. Alternatively, a socket not allowing reuse could
+be added to an existing bind bucket without updating the flags, causing
+bind_conflict to never be called as it should.
+
+Call inet_csk_update_fastreuse when __inet_inherit_port decides to create
+a new bind_bucket or use a different bind_bucket than the one of the
+listen socket.
+
+Fixes: 093d282321da ("tproxy: fix hash locking issue when using port redirection in __inet_inherit_port()")
+Acked-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -161,6 +161,7 @@ int __inet_inherit_port(const struct soc
+ return -ENOMEM;
+ }
+ }
++ inet_csk_update_fastreuse(tb, child);
+ }
+ inet_bind_hash(child, tb, port);
+ spin_unlock(&head->lock);
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Qingyu Li <ieatmuttonchuan@gmail.com>
+Date: Mon, 10 Aug 2020 09:51:00 +0800
+Subject: net/nfc/rawsock.c: add CAP_NET_RAW check.
+
+From: Qingyu Li <ieatmuttonchuan@gmail.com>
+
+[ Upstream commit 26896f01467a28651f7a536143fe5ac8449d4041 ]
+
+When creating a raw AF_NFC socket, CAP_NET_RAW needs to be checked first.
+
+Signed-off-by: Qingyu Li <ieatmuttonchuan@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/rawsock.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -344,10 +344,13 @@ static int rawsock_create(struct net *ne
+ if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
+ return -ESOCKTNOSUPPORT;
+
+- if (sock->type == SOCK_RAW)
++ if (sock->type == SOCK_RAW) {
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ sock->ops = &rawsock_raw_ops;
+- else
++ } else {
+ sock->ops = &rawsock_ops;
++ }
+
+ sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
+ if (!sk)
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+Date: Tue, 11 Aug 2020 20:33:23 +0200
+Subject: net: refactor bind_bucket fastreuse into helper
+
+From: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+
+[ Upstream commit 62ffc589abb176821662efc4525ee4ac0b9c3894 ]
+
+Refactor the fastreuse update code in inet_csk_get_port into a small
+helper function that can be called from other places.
+
+Acked-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Tim Froidcoeur <tim.froidcoeur@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_connection_sock.h | 4 +
+ net/ipv4/inet_connection_sock.c | 93 ++++++++++++++++++++-----------------
+ 2 files changed, 55 insertions(+), 42 deletions(-)
+
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -313,5 +313,9 @@ int inet_csk_compat_getsockopt(struct so
+ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+
++/* update the fast reuse flag when adding a socket */
++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
++ struct sock *sk);
++
+ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+ #endif /* _INET_CONNECTION_SOCK_H */
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -285,51 +285,12 @@ static inline int sk_reuseport_match(str
+ ipv6_only_sock(sk), true, false);
+ }
+
+-/* Obtain a reference to a local port for the given sock,
+- * if snum is zero it means select any available local port.
+- * We try to allocate an odd port (and leave even ports for connect())
+- */
+-int inet_csk_get_port(struct sock *sk, unsigned short snum)
++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
++ struct sock *sk)
+ {
+- bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+- struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
+- int ret = 1, port = snum;
+- struct inet_bind_hashbucket *head;
+- struct net *net = sock_net(sk);
+- struct inet_bind_bucket *tb = NULL;
+ kuid_t uid = sock_i_uid(sk);
++ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+
+- if (!port) {
+- head = inet_csk_find_open_port(sk, &tb, &port);
+- if (!head)
+- return ret;
+- if (!tb)
+- goto tb_not_found;
+- goto success;
+- }
+- head = &hinfo->bhash[inet_bhashfn(net, port,
+- hinfo->bhash_size)];
+- spin_lock_bh(&head->lock);
+- inet_bind_bucket_for_each(tb, &head->chain)
+- if (net_eq(ib_net(tb), net) && tb->port == port)
+- goto tb_found;
+-tb_not_found:
+- tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
+- net, head, port);
+- if (!tb)
+- goto fail_unlock;
+-tb_found:
+- if (!hlist_empty(&tb->owners)) {
+- if (sk->sk_reuse == SK_FORCE_REUSE)
+- goto success;
+-
+- if ((tb->fastreuse > 0 && reuse) ||
+- sk_reuseport_match(tb, sk))
+- goto success;
+- if (inet_csk_bind_conflict(sk, tb, true, true))
+- goto fail_unlock;
+- }
+-success:
+ if (hlist_empty(&tb->owners)) {
+ tb->fastreuse = reuse;
+ if (sk->sk_reuseport) {
+@@ -373,6 +334,54 @@ success:
+ tb->fastreuseport = 0;
+ }
+ }
++}
++
++/* Obtain a reference to a local port for the given sock,
++ * if snum is zero it means select any available local port.
++ * We try to allocate an odd port (and leave even ports for connect())
++ */
++int inet_csk_get_port(struct sock *sk, unsigned short snum)
++{
++ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
++ struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
++ int ret = 1, port = snum;
++ struct inet_bind_hashbucket *head;
++ struct net *net = sock_net(sk);
++ struct inet_bind_bucket *tb = NULL;
++
++ if (!port) {
++ head = inet_csk_find_open_port(sk, &tb, &port);
++ if (!head)
++ return ret;
++ if (!tb)
++ goto tb_not_found;
++ goto success;
++ }
++ head = &hinfo->bhash[inet_bhashfn(net, port,
++ hinfo->bhash_size)];
++ spin_lock_bh(&head->lock);
++ inet_bind_bucket_for_each(tb, &head->chain)
++ if (net_eq(ib_net(tb), net) && tb->port == port)
++ goto tb_found;
++tb_not_found:
++ tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
++ net, head, port);
++ if (!tb)
++ goto fail_unlock;
++tb_found:
++ if (!hlist_empty(&tb->owners)) {
++ if (sk->sk_reuse == SK_FORCE_REUSE)
++ goto success;
++
++ if ((tb->fastreuse > 0 && reuse) ||
++ sk_reuseport_match(tb, sk))
++ goto success;
++ if (inet_csk_bind_conflict(sk, tb, true, true))
++ goto fail_unlock;
++ }
++success:
++ inet_csk_update_fastreuse(tb, sk);
++
+ if (!inet_csk(sk)->icsk_bind_hash)
+ inet_bind_hash(sk, tb, port);
+ WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 6 Aug 2020 19:53:16 +0800
+Subject: net: Set fput_needed iff FDPUT_FPUT is set
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit ce787a5a074a86f76f5d3fd804fa78e01bfb9e89 ]
+
+We should fput() file iff FDPUT_FPUT is set. So we should set fput_needed
+accordingly.
+
+Fixes: 00e188ef6a7e ("sockfd_lookup_light(): switch to fdget^W^Waway from fget_light")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -474,7 +474,7 @@ static struct socket *sockfd_lookup_ligh
+ if (f.file) {
+ sock = sock_from_file(f.file, err);
+ if (likely(sock)) {
+- *fput_needed = f.flags;
++ *fput_needed = f.flags & FDPUT_FPUT;
+ return sock;
+ }
+ fdput(f);
--- /dev/null
+From foo@baz Mon 17 Aug 2020 11:42:16 AM CEST
+From: Ira Weiny <ira.weiny@intel.com>
+Date: Mon, 10 Aug 2020 17:02:58 -0700
+Subject: net/tls: Fix kmap usage
+
+From: Ira Weiny <ira.weiny@intel.com>
+
+[ Upstream commit b06c19d9f827f6743122795570bfc0c72db482b0 ]
+
+When MSG_OOB is specified to tls_device_sendpage() the mapped page is
+never unmapped.
+
+Hold off mapping the page until after the flags are checked and the page
+is actually needed.
+
+Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure")
+Signed-off-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_device.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -476,7 +476,7 @@ int tls_device_sendpage(struct sock *sk,
+ int offset, size_t size, int flags)
+ {
+ struct iov_iter msg_iter;
+- char *kaddr = kmap(page);
++ char *kaddr;
+ struct kvec iov;
+ int rc;
+
+@@ -490,6 +490,7 @@ int tls_device_sendpage(struct sock *sk,
+ goto out;
+ }
+
++ kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
+ iov.iov_len = size;
+ iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
svcrdma-fix-page-leak-in-svc_rdma_recv_read_chunk.patch
x86-fsgsbase-64-fix-null-deref-in-86_fsgsbase_read_t.patch
crypto-aesni-add-compatibility-with-ias.patch
+af_packet-tpacket_v3-fix-fill-status-rwlock-imbalance.patch
+drivers-net-wan-lapbether-added-needed_headroom-and-a-skb-len-check.patch
+net-nfc-rawsock.c-add-cap_net_raw-check.patch
+net-set-fput_needed-iff-fdput_fput-is-set.patch
+net-tls-fix-kmap-usage.patch
+net-refactor-bind_bucket-fastreuse-into-helper.patch
+net-initialize-fastreuse-on-inet_inherit_port.patch