--- /dev/null
+From 64e759f58f128730b97a3c3a26d283c075ad7c86 Mon Sep 17 00:00:00 2001
+From: Szymon Janc <szymon.janc@codecoup.pl>
+Date: Mon, 26 Feb 2018 15:41:53 +0100
+Subject: Bluetooth: Fix missing encryption refresh on Security Request
+
+From: Szymon Janc <szymon.janc@codecoup.pl>
+
+commit 64e759f58f128730b97a3c3a26d283c075ad7c86 upstream.
+
+If Security Request is received on connection that is already encrypted
+with sufficient security master should perform encryption key refresh
+procedure instead of just ignoring Slave Security Request
+(Core Spec 5.0 Vol 3 Part H 2.4.6).
+
+> ACL Data RX: Handle 3585 flags 0x02 dlen 6
+ SMP: Security Request (0x0b) len 1
+ Authentication requirement: Bonding, No MITM, SC, No Keypresses (0x09)
+< HCI Command: LE Start Encryption (0x08|0x0019) plen 28
+ Handle: 3585
+ Random number: 0x0000000000000000
+ Encrypted diversifier: 0x0000
+ Long term key: 44264272a5c426a9e868f034cf0e69f3
+> HCI Event: Command Status (0x0f) plen 4
+ LE Start Encryption (0x08|0x0019) ncmd 1
+ Status: Success (0x00)
+> HCI Event: Encryption Key Refresh Complete (0x30) plen 3
+ Status: Success (0x00)
+ Handle: 3585
+
+Signed-off-by: Szymon Janc <szymon.janc@codecoup.pl>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/smp.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2287,8 +2287,14 @@ static u8 smp_cmd_security_req(struct l2
+ else
+ sec_level = authreq_to_seclevel(auth);
+
+- if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
++ if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
++ /* If link is already encrypted with sufficient security we
++ * still need refresh encryption as per Core Spec 5.0 Vol 3,
++ * Part H 2.4.6
++ */
++ smp_ltk_encrypt(conn, hcon->sec_level);
+ return 0;
++ }
+
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
--- /dev/null
+From b954f94023dcc61388c8384f0f14eb8e42c863c5 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Mon, 12 Mar 2018 14:54:24 +0100
+Subject: l2tp: fix races with ipv4-mapped ipv6 addresses
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit b954f94023dcc61388c8384f0f14eb8e42c863c5 upstream.
+
+The l2tp_tunnel_create() function checks for v4mapped ipv6
+sockets and cache that flag, so that l2tp core code can
+reusing it at xmit time.
+
+If the socket is provided by the userspace, the connection
+status of the tunnel sockets can change between the tunnel
+creation and the xmit call, so that syzbot is able to
+trigger the following splat:
+
+BUG: KASAN: use-after-free in ip6_dst_idev include/net/ip6_fib.h:192
+[inline]
+BUG: KASAN: use-after-free in ip6_xmit+0x1f76/0x2260
+net/ipv6/ip6_output.c:264
+Read of size 8 at addr ffff8801bd949318 by task syz-executor4/23448
+
+CPU: 0 PID: 23448 Comm: syz-executor4 Not tainted 4.16.0-rc4+ #65
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:17 [inline]
+ dump_stack+0x194/0x24d lib/dump_stack.c:53
+ print_address_description+0x73/0x250 mm/kasan/report.c:256
+ kasan_report_error mm/kasan/report.c:354 [inline]
+ kasan_report+0x23c/0x360 mm/kasan/report.c:412
+ __asan_report_load8_noabort+0x14/0x20 mm/kasan/report.c:433
+ ip6_dst_idev include/net/ip6_fib.h:192 [inline]
+ ip6_xmit+0x1f76/0x2260 net/ipv6/ip6_output.c:264
+ inet6_csk_xmit+0x2fc/0x580 net/ipv6/inet6_connection_sock.c:139
+ l2tp_xmit_core net/l2tp/l2tp_core.c:1053 [inline]
+ l2tp_xmit_skb+0x105f/0x1410 net/l2tp/l2tp_core.c:1148
+ pppol2tp_sendmsg+0x470/0x670 net/l2tp/l2tp_ppp.c:341
+ sock_sendmsg_nosec net/socket.c:630 [inline]
+ sock_sendmsg+0xca/0x110 net/socket.c:640
+ ___sys_sendmsg+0x767/0x8b0 net/socket.c:2046
+ __sys_sendmsg+0xe5/0x210 net/socket.c:2080
+ SYSC_sendmsg net/socket.c:2091 [inline]
+ SyS_sendmsg+0x2d/0x50 net/socket.c:2087
+ do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x42/0xb7
+RIP: 0033:0x453e69
+RSP: 002b:00007f819593cc68 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 00007f819593d6d4 RCX: 0000000000453e69
+RDX: 0000000000000081 RSI: 000000002037ffc8 RDI: 0000000000000004
+RBP: 000000000072bea0 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
+R13: 00000000000004c3 R14: 00000000006f72e8 R15: 0000000000000000
+
+This change addresses the issues:
+* explicitly checking for TCP_ESTABLISHED for user space provided sockets
+* dropping the v4mapped flag usage - it can become outdated - and
+ explicitly invoking ipv6_addr_v4mapped() instead
+
+The issue is apparently there since ancient times.
+
+v1 -> v2: (many thanks to Guillaume)
+ - with csum issue introduced in v1
+ - replace pr_err with pr_debug
+ - fix build issue with IPV6 disabled
+ - move l2tp_sk_is_v4mapped in l2tp_core.c
+
+v2 -> v3:
+ - don't update inet_daddr for v4mapped address, unneeded
+ - drop rendundant check at creation time
+
+Reported-and-tested-by: syzbot+92fa328176eb07e4ac1a@syzkaller.appspotmail.com
+Fixes: 3557baabf280 ("[L2TP]: PPP over L2TP driver core")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/l2tp/l2tp_core.c | 38 ++++++++++++++++++--------------------
+ net/l2tp/l2tp_core.h | 3 ---
+ 2 files changed, 18 insertions(+), 23 deletions(-)
+
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -113,6 +113,13 @@ struct l2tp_net {
+ spinlock_t l2tp_session_hlist_lock;
+ };
+
++#if IS_ENABLED(CONFIG_IPV6)
++static bool l2tp_sk_is_v6(struct sock *sk)
++{
++ return sk->sk_family == PF_INET6 &&
++ !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
++}
++#endif
+
+ static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+ {
+@@ -1130,7 +1137,7 @@ static int l2tp_xmit_core(struct l2tp_se
+ /* Queue the packet to IP for output */
+ skb->ignore_df = 1;
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
++ if (l2tp_sk_is_v6(tunnel->sock))
+ error = inet6_csk_xmit(tunnel->sock, skb, NULL);
+ else
+ #endif
+@@ -1193,6 +1200,15 @@ int l2tp_xmit_skb(struct l2tp_session *s
+ goto out_unlock;
+ }
+
++ /* The user-space may change the connection status for the user-space
++ * provided socket at run time: we must check it under the socket lock
++ */
++ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
++ kfree_skb(skb);
++ ret = NET_XMIT_DROP;
++ goto out_unlock;
++ }
++
+ /* Get routing info from the tunnel socket */
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
+@@ -1212,7 +1228,7 @@ int l2tp_xmit_skb(struct l2tp_session *s
+
+ /* Calculate UDP checksum if configured to do so */
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
++ if (l2tp_sk_is_v6(sk))
+ udp6_set_csum(udp_get_no_check6_tx(sk),
+ skb, &inet6_sk(sk)->saddr,
+ &sk->sk_v6_daddr, udp_len);
+@@ -1616,24 +1632,6 @@ int l2tp_tunnel_create(struct net *net,
+ if (cfg != NULL)
+ tunnel->debug = cfg->debug;
+
+-#if IS_ENABLED(CONFIG_IPV6)
+- if (sk->sk_family == PF_INET6) {
+- struct ipv6_pinfo *np = inet6_sk(sk);
+-
+- if (ipv6_addr_v4mapped(&np->saddr) &&
+- ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
+- struct inet_sock *inet = inet_sk(sk);
+-
+- tunnel->v4mapped = true;
+- inet->inet_saddr = np->saddr.s6_addr32[3];
+- inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
+- inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
+- } else {
+- tunnel->v4mapped = false;
+- }
+- }
+-#endif
+-
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ tunnel->encap = encap;
+ if (encap == L2TP_ENCAPTYPE_UDP) {
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -195,9 +195,6 @@ struct l2tp_tunnel {
+ struct sock *sock; /* Parent socket */
+ int fd; /* Parent fd, if tunnel socket
+ * was created by userspace */
+-#if IS_ENABLED(CONFIG_IPV6)
+- bool v4mapped;
+-#endif
+
+ struct work_struct del_work;
+
--- /dev/null
+From 0dcd7876029b58770f769cbb7b484e88e4a305e5 Mon Sep 17 00:00:00 2001
+From: Greg Hackmann <ghackmann@google.com>
+Date: Wed, 7 Mar 2018 14:42:53 -0800
+Subject: net: xfrm: use preempt-safe this_cpu_read() in ipcomp_alloc_tfms()
+
+From: Greg Hackmann <ghackmann@google.com>
+
+commit 0dcd7876029b58770f769cbb7b484e88e4a305e5 upstream.
+
+f7c83bcbfaf5 ("net: xfrm: use __this_cpu_read per-cpu helper") added a
+__this_cpu_read() call inside ipcomp_alloc_tfms().
+
+At the time, __this_cpu_read() required the caller to either not care
+about races or to handle preemption/interrupt issues. 3.15 tightened
+the rules around some per-cpu operations, and now __this_cpu_read()
+should never be used in a preemptible context. On 3.15 and later, we
+need to use this_cpu_read() instead.
+
+syzkaller reported this leading to the following kernel BUG while
+fuzzing sendmsg:
+
+BUG: using __this_cpu_read() in preemptible [00000000] code: repro/3101
+caller is ipcomp_init_state+0x185/0x990
+CPU: 3 PID: 3101 Comm: repro Not tainted 4.16.0-rc4-00123-g86f84779d8e9 #154
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
+Call Trace:
+ dump_stack+0xb9/0x115
+ check_preemption_disabled+0x1cb/0x1f0
+ ipcomp_init_state+0x185/0x990
+ ? __xfrm_init_state+0x876/0xc20
+ ? lock_downgrade+0x5e0/0x5e0
+ ipcomp4_init_state+0xaa/0x7c0
+ __xfrm_init_state+0x3eb/0xc20
+ xfrm_init_state+0x19/0x60
+ pfkey_add+0x20df/0x36f0
+ ? pfkey_broadcast+0x3dd/0x600
+ ? pfkey_sock_destruct+0x340/0x340
+ ? pfkey_seq_stop+0x80/0x80
+ ? __skb_clone+0x236/0x750
+ ? kmem_cache_alloc+0x1f6/0x260
+ ? pfkey_sock_destruct+0x340/0x340
+ ? pfkey_process+0x62a/0x6f0
+ pfkey_process+0x62a/0x6f0
+ ? pfkey_send_new_mapping+0x11c0/0x11c0
+ ? mutex_lock_io_nested+0x1390/0x1390
+ pfkey_sendmsg+0x383/0x750
+ ? dump_sp+0x430/0x430
+ sock_sendmsg+0xc0/0x100
+ ___sys_sendmsg+0x6c8/0x8b0
+ ? copy_msghdr_from_user+0x3b0/0x3b0
+ ? pagevec_lru_move_fn+0x144/0x1f0
+ ? find_held_lock+0x32/0x1c0
+ ? do_huge_pmd_anonymous_page+0xc43/0x11e0
+ ? lock_downgrade+0x5e0/0x5e0
+ ? get_kernel_page+0xb0/0xb0
+ ? _raw_spin_unlock+0x29/0x40
+ ? do_huge_pmd_anonymous_page+0x400/0x11e0
+ ? __handle_mm_fault+0x553/0x2460
+ ? __fget_light+0x163/0x1f0
+ ? __sys_sendmsg+0xc7/0x170
+ __sys_sendmsg+0xc7/0x170
+ ? SyS_shutdown+0x1a0/0x1a0
+ ? __do_page_fault+0x5a0/0xca0
+ ? lock_downgrade+0x5e0/0x5e0
+ SyS_sendmsg+0x27/0x40
+ ? __sys_sendmsg+0x170/0x170
+ do_syscall_64+0x19f/0x640
+ entry_SYSCALL_64_after_hwframe+0x42/0xb7
+RIP: 0033:0x7f0ee73dfb79
+RSP: 002b:00007ffe14fc15a8 EFLAGS: 00000207 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f0ee73dfb79
+RDX: 0000000000000000 RSI: 00000000208befc8 RDI: 0000000000000004
+RBP: 00007ffe14fc15b0 R08: 00007ffe14fc15c0 R09: 00007ffe14fc15c0
+R10: 0000000000000000 R11: 0000000000000207 R12: 0000000000400440
+R13: 00007ffe14fc16b0 R14: 0000000000000000 R15: 0000000000000000
+
+Signed-off-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_ipcomp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/xfrm/xfrm_ipcomp.c
++++ b/net/xfrm/xfrm_ipcomp.c
+@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ip
+ struct crypto_comp *tfm;
+
+ /* This can be any valid CPU ID so we don't need locking. */
+- tfm = __this_cpu_read(*pos->tfms);
++ tfm = this_cpu_read(*pos->tfms);
+
+ if (!strcmp(crypto_comp_name(tfm), alg_name)) {
+ pos->users++;
--- /dev/null
+From c8d70a700a5b486bfa8e5a7d33d805389f6e59f9 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Fri, 9 Mar 2018 14:27:31 +0100
+Subject: netfilter: bridge: ebt_among: add more missing match size checks
+
+From: Florian Westphal <fw@strlen.de>
+
+commit c8d70a700a5b486bfa8e5a7d33d805389f6e59f9 upstream.
+
+ebt_among is special, it has a dynamic match size and is exempt
+from the central size checks.
+
+commit c4585a2823edf ("bridge: ebt_among: add missing match size checks")
+added validation for pool size, but missed fact that the macros
+ebt_among_wh_src/dst can already return out-of-bound result because
+they do not check value of wh_src/dst_ofs (an offset) vs. the size
+of the match that userspace gave to us.
+
+v2:
+check that offset has correct alignment.
+Paolo Abeni points out that we should also check that src/dst
+wormhash arrays do not overlap, and src + length lines up with
+start of dst (or vice versa).
+v3: compact wormhash_sizes_valid() part
+
+NB: Fixes tag is intentionally wrong, this bug exists from day
+one when match was added for 2.6 kernel. Tag is there so stable
+maintainers will notice this one too.
+
+Tested with same rules from the earlier patch.
+
+Fixes: c4585a2823edf ("bridge: ebt_among: add missing match size checks")
+Reported-by: <syzbot+bdabab6f1983a03fc009@syzkaller.appspotmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/netfilter/ebt_among.c | 34 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 34 insertions(+)
+
+--- a/net/bridge/netfilter/ebt_among.c
++++ b/net/bridge/netfilter/ebt_among.c
+@@ -177,6 +177,28 @@ static bool poolsize_invalid(const struc
+ return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
+ }
+
++static bool wormhash_offset_invalid(int off, unsigned int len)
++{
++ if (off == 0) /* not present */
++ return false;
++
++ if (off < (int)sizeof(struct ebt_among_info) ||
++ off % __alignof__(struct ebt_mac_wormhash))
++ return true;
++
++ off += sizeof(struct ebt_mac_wormhash);
++
++ return off > len;
++}
++
++static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
++{
++ if (a == 0)
++ a = sizeof(struct ebt_among_info);
++
++ return ebt_mac_wormhash_size(wh) + a == b;
++}
++
+ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ {
+ const struct ebt_among_info *info = par->matchinfo;
+@@ -189,6 +211,10 @@ static int ebt_among_mt_check(const stru
+ if (expected_length > em->match_size)
+ return -EINVAL;
+
++ if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
++ wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
++ return -EINVAL;
++
+ wh_dst = ebt_among_wh_dst(info);
+ if (poolsize_invalid(wh_dst))
+ return -EINVAL;
+@@ -201,6 +227,14 @@ static int ebt_among_mt_check(const stru
+ if (poolsize_invalid(wh_src))
+ return -EINVAL;
+
++ if (info->wh_src_ofs < info->wh_dst_ofs) {
++ if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
++ return -EINVAL;
++ } else {
++ if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
++ return -EINVAL;
++ }
++
+ expected_length += ebt_mac_wormhash_size(wh_src);
+
+ if (em->match_size != EBT_ALIGN(expected_length)) {
--- /dev/null
+From aebfa52a925d701114afd6af0def35bab16d4f47 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 22 Mar 2018 11:08:50 +0100
+Subject: netfilter: drop template ct when conntrack is skipped.
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit aebfa52a925d701114afd6af0def35bab16d4f47 upstream.
+
+The ipv4 nf_ct code currently skips the nf_conntrak_in() call
+for fragmented packets. As a results later matches/target can end
+up manipulating template ct entry instead of 'real' ones.
+
+Exploiting the above, syzbot found a way to trigger the following
+splat:
+
+WARNING: CPU: 1 PID: 4242 at net/netfilter/xt_cluster.c:55
+xt_cluster_mt+0x6c1/0x840 net/netfilter/xt_cluster.c:127
+Kernel panic - not syncing: panic_on_warn set ...
+
+CPU: 1 PID: 4242 Comm: syzkaller027971 Not tainted 4.16.0-rc2+ #243
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:17 [inline]
+ dump_stack+0x194/0x24d lib/dump_stack.c:53
+ panic+0x1e4/0x41c kernel/panic.c:183
+ __warn+0x1dc/0x200 kernel/panic.c:547
+ report_bug+0x211/0x2d0 lib/bug.c:184
+ fixup_bug.part.11+0x37/0x80 arch/x86/kernel/traps.c:178
+ fixup_bug arch/x86/kernel/traps.c:247 [inline]
+ do_error_trap+0x2d7/0x3e0 arch/x86/kernel/traps.c:296
+ do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:315
+ invalid_op+0x58/0x80 arch/x86/entry/entry_64.S:957
+RIP: 0010:xt_cluster_hash net/netfilter/xt_cluster.c:55 [inline]
+RIP: 0010:xt_cluster_mt+0x6c1/0x840 net/netfilter/xt_cluster.c:127
+RSP: 0018:ffff8801d2f6f2d0 EFLAGS: 00010293
+RAX: ffff8801af700540 RBX: 0000000000000000 RCX: ffffffff84a2d1e1
+RDX: 0000000000000000 RSI: ffff8801d2f6f478 RDI: ffff8801cafd336a
+RBP: ffff8801d2f6f2e8 R08: 0000000000000000 R09: 0000000000000001
+R10: 0000000000000000 R11: 0000000000000000 R12: ffff8801b03b3d18
+R13: ffff8801cafd3300 R14: dffffc0000000000 R15: ffff8801d2f6f478
+ ipt_do_table+0xa91/0x19b0 net/ipv4/netfilter/ip_tables.c:296
+ iptable_filter_hook+0x65/0x80 net/ipv4/netfilter/iptable_filter.c:41
+ nf_hook_entry_hookfn include/linux/netfilter.h:120 [inline]
+ nf_hook_slow+0xba/0x1a0 net/netfilter/core.c:483
+ nf_hook include/linux/netfilter.h:243 [inline]
+ NF_HOOK include/linux/netfilter.h:286 [inline]
+ raw_send_hdrinc.isra.17+0xf39/0x1880 net/ipv4/raw.c:432
+ raw_sendmsg+0x14cd/0x26b0 net/ipv4/raw.c:669
+ inet_sendmsg+0x11f/0x5e0 net/ipv4/af_inet.c:763
+ sock_sendmsg_nosec net/socket.c:629 [inline]
+ sock_sendmsg+0xca/0x110 net/socket.c:639
+ SYSC_sendto+0x361/0x5c0 net/socket.c:1748
+ SyS_sendto+0x40/0x50 net/socket.c:1716
+ do_syscall_64+0x280/0x940 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x42/0xb7
+RIP: 0033:0x441b49
+RSP: 002b:00007ffff5ca8b18 EFLAGS: 00000216 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 0000000000441b49
+RDX: 0000000000000030 RSI: 0000000020ff7000 RDI: 0000000000000003
+RBP: 00000000006cc018 R08: 000000002066354c R09: 0000000000000010
+R10: 0000000000000000 R11: 0000000000000216 R12: 0000000000403470
+R13: 0000000000403500 R14: 0000000000000000 R15: 0000000000000000
+Dumping ftrace buffer:
+ (ftrace buffer empty)
+Kernel Offset: disabled
+Rebooting in 86400 seconds..
+
+Instead of adding checks for template ct on every target/match
+manipulating skb->_nfct, simply drop the template ct when skipping
+nf_conntrack_in().
+
+Fixes: 7b4fdf77a450ec ("netfilter: don't track fragmented packets")
+Reported-and-tested-by: syzbot+0346441ae0545cfcea3a@syzkaller.appspotmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -159,8 +159,20 @@ static unsigned int ipv4_conntrack_local
+ ip_hdrlen(skb) < sizeof(struct iphdr))
+ return NF_ACCEPT;
+
+- if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
++ if (ip_is_fragment(ip_hdr(skb))) { /* IP_NODEFRAG setsockopt set */
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *tmpl;
++
++ tmpl = nf_ct_get(skb, &ctinfo);
++ if (tmpl && nf_ct_is_template(tmpl)) {
++ /* when skipping ct, clear templates to avoid fooling
++ * later targets/matches
++ */
++ skb->_nfct = 0;
++ nf_ct_put(tmpl);
++ }
+ return NF_ACCEPT;
++ }
+
+ return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+ }
--- /dev/null
+From b1d0a5d0cba4597c0394997b2d5fced3e3841b4e Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Sat, 10 Mar 2018 01:15:45 +0100
+Subject: netfilter: x_tables: add and use xt_check_proc_name
+
+From: Florian Westphal <fw@strlen.de>
+
+commit b1d0a5d0cba4597c0394997b2d5fced3e3841b4e upstream.
+
+recent and hashlimit both create /proc files, but only check that
+name is 0 terminated.
+
+This can trigger WARN() from procfs when name is "" or "/".
+Add helper for this and then use it for both.
+
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Reported-by: <syzbot+0502b00edac2a0680b61@syzkaller.appspotmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/netfilter/x_tables.h | 2 ++
+ net/netfilter/x_tables.c | 30 ++++++++++++++++++++++++++++++
+ net/netfilter/xt_hashlimit.c | 16 ++++++++++------
+ net/netfilter/xt_recent.c | 6 +++---
+ 4 files changed, 45 insertions(+), 9 deletions(-)
+
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -285,6 +285,8 @@ unsigned int *xt_alloc_entry_offsets(uns
+ bool xt_find_jump_offset(const unsigned int *offsets,
+ unsigned int target, unsigned int size);
+
++int xt_check_proc_name(const char *name, unsigned int size);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, un
+ return buf;
+ }
+
++/**
++ * xt_check_proc_name - check that name is suitable for /proc file creation
++ *
++ * @name: file name candidate
++ * @size: length of buffer
++ *
++ * some x_tables modules wish to create a file in /proc.
++ * This function makes sure that the name is suitable for this
++ * purpose, it checks that name is NUL terminated and isn't a 'special'
++ * name, like "..".
++ *
++ * returns negative number on error or 0 if name is useable.
++ */
++int xt_check_proc_name(const char *name, unsigned int size)
++{
++ if (name[0] == '\0')
++ return -EINVAL;
++
++ if (strnlen(name, size) == size)
++ return -ENAMETOOLONG;
++
++ if (strcmp(name, ".") == 0 ||
++ strcmp(name, "..") == 0 ||
++ strchr(name, '/'))
++ return -EINVAL;
++
++ return 0;
++}
++EXPORT_SYMBOL(xt_check_proc_name);
++
+ int xt_check_match(struct xt_mtchk_param *par,
+ unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -915,8 +915,9 @@ static int hashlimit_mt_check_v1(const s
+ struct hashlimit_cfg3 cfg = {};
+ int ret;
+
+- if (info->name[sizeof(info->name) - 1] != '\0')
+- return -EINVAL;
++ ret = xt_check_proc_name(info->name, sizeof(info->name));
++ if (ret)
++ return ret;
+
+ ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
+
+@@ -933,8 +934,9 @@ static int hashlimit_mt_check_v2(const s
+ struct hashlimit_cfg3 cfg = {};
+ int ret;
+
+- if (info->name[sizeof(info->name) - 1] != '\0')
+- return -EINVAL;
++ ret = xt_check_proc_name(info->name, sizeof(info->name));
++ if (ret)
++ return ret;
+
+ ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
+
+@@ -948,9 +950,11 @@ static int hashlimit_mt_check_v2(const s
+ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
+ {
+ struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
++ int ret;
+
+- if (info->name[sizeof(info->name) - 1] != '\0')
+- return -EINVAL;
++ ret = xt_check_proc_name(info->name, sizeof(info->name));
++ if (ret)
++ return ret;
+
+ return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
+ info->name, 3);
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -361,9 +361,9 @@ static int recent_mt_check(const struct
+ info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
+ return -EINVAL;
+ }
+- if (info->name[0] == '\0' ||
+- strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
+- return -EINVAL;
++ ret = xt_check_proc_name(info->name, sizeof(info->name));
++ if (ret)
++ return ret;
+
+ if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
+ nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
--- /dev/null
+From 0537250fdc6c876ed4cbbe874c739aebef493ee2 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@kernel.org>
+Date: Tue, 30 Jan 2018 11:30:11 -0800
+Subject: netfilter: x_tables: make allocation less aggressive
+
+From: Michal Hocko <mhocko@kernel.org>
+
+commit 0537250fdc6c876ed4cbbe874c739aebef493ee2 upstream.
+
+syzbot has noticed that xt_alloc_table_info can allocate a lot of memory.
+This is an admin only interface but an admin in a namespace is sufficient
+as well. eacd86ca3b03 ("net/netfilter/x_tables.c: use kvmalloc() in
+xt_alloc_table_info()") has changed the opencoded kmalloc->vmalloc
+fallback into kvmalloc. It has dropped __GFP_NORETRY on the way because
+vmalloc has simply never fully supported __GFP_NORETRY semantic. This is
+still the case because e.g. page tables backing the vmalloc area are
+hardcoded GFP_KERNEL.
+
+Revert back to __GFP_NORETRY as a poors man defence against excessively
+large allocation request here. We will not rule out the OOM killer
+completely but __GFP_NORETRY should at least stop the large request in
+most cases.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Fixes: eacd86ca3b03 ("net/netfilter/x_tables.c: use kvmalloc() in xt_alloc_tableLink: http://lkml.kernel.org/r/20180130140104.GE21609@dhcp22.suse.cz
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/x_tables.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -1008,7 +1008,12 @@ struct xt_table_info *xt_alloc_table_inf
+ if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
+ return NULL;
+
+- info = kvmalloc(sz, GFP_KERNEL);
++ /* __GFP_NORETRY is not fully supported by kvmalloc but it should
++ * work reasonably well if sz is too large and bail out rather
++ * than shoot all processes down before realizing there is nothing
++ * more to reclaim.
++ */
++ info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
+ if (!info)
+ return NULL;
+
--- /dev/null
+From 47504ee04b9241548ae2c28be7d0b01cff3b7aa6 Mon Sep 17 00:00:00 2001
+From: Dennis Zhou <dennisszhou@gmail.com>
+Date: Fri, 16 Feb 2018 12:07:19 -0600
+Subject: percpu: add __GFP_NORETRY semantics to the percpu balancing path
+
+From: Dennis Zhou <dennisszhou@gmail.com>
+
+commit 47504ee04b9241548ae2c28be7d0b01cff3b7aa6 upstream.
+
+Percpu memory using the vmalloc area based chunk allocator lazily
+populates chunks by first requesting the full virtual address space
+required for the chunk and subsequently adding pages as allocations come
+through. To ensure atomic allocations can succeed, a workqueue item is
+used to maintain a minimum number of empty pages. In certain scenarios,
+such as reported in [1], it is possible that physical memory becomes
+quite scarce which can result in either a rather long time spent trying
+to find free pages or worse, a kernel panic.
+
+This patch adds support for __GFP_NORETRY and __GFP_NOWARN passing them
+through to the underlying allocators. This should prevent any
+unnecessary panics potentially caused by the workqueue item. The passing
+of gfp around is as additional flags rather than a full set of flags.
+The next patch will change these to caller passed semantics.
+
+V2:
+Added const modifier to gfp flags in the balance path.
+Removed an extra whitespace.
+
+[1] https://lkml.org/lkml/2018/2/12/551
+
+Signed-off-by: Dennis Zhou <dennisszhou@gmail.com>
+Suggested-by: Daniel Borkmann <daniel@iogearbox.net>
+Reported-by: syzbot+adb03f3f0bb57ce3acda@syzkaller.appspotmail.com
+Acked-by: Christoph Lameter <cl@linux.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/percpu-km.c | 8 ++++----
+ mm/percpu-vm.c | 18 +++++++++++-------
+ mm/percpu.c | 45 ++++++++++++++++++++++++++++-----------------
+ 3 files changed, 43 insertions(+), 28 deletions(-)
+
+--- a/mm/percpu-km.c
++++ b/mm/percpu-km.c
+@@ -34,7 +34,7 @@
+ #include <linux/log2.h>
+
+ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
+- int page_start, int page_end)
++ int page_start, int page_end, gfp_t gfp)
+ {
+ return 0;
+ }
+@@ -45,18 +45,18 @@ static void pcpu_depopulate_chunk(struct
+ /* nada */
+ }
+
+-static struct pcpu_chunk *pcpu_create_chunk(void)
++static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+ {
+ const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
+ struct pcpu_chunk *chunk;
+ struct page *pages;
+ int i;
+
+- chunk = pcpu_alloc_chunk();
++ chunk = pcpu_alloc_chunk(gfp);
+ if (!chunk)
+ return NULL;
+
+- pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
++ pages = alloc_pages(gfp | GFP_KERNEL, order_base_2(nr_pages));
+ if (!pages) {
+ pcpu_free_chunk(chunk);
+ return NULL;
+--- a/mm/percpu-vm.c
++++ b/mm/percpu-vm.c
+@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void
+ lockdep_assert_held(&pcpu_alloc_mutex);
+
+ if (!pages)
+- pages = pcpu_mem_zalloc(pages_size);
++ pages = pcpu_mem_zalloc(pages_size, 0);
+ return pages;
+ }
+
+@@ -73,18 +73,21 @@ static void pcpu_free_pages(struct pcpu_
+ * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
+ * @page_start: page index of the first page to be allocated
+ * @page_end: page index of the last page to be allocated + 1
++ * @gfp: allocation flags passed to the underlying allocator
+ *
+ * Allocate pages [@page_start,@page_end) into @pages for all units.
+ * The allocation is for @chunk. Percpu core doesn't care about the
+ * content of @pages and will pass it verbatim to pcpu_map_pages().
+ */
+ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
+- struct page **pages, int page_start, int page_end)
++ struct page **pages, int page_start, int page_end,
++ gfp_t gfp)
+ {
+- const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+ unsigned int cpu, tcpu;
+ int i;
+
++ gfp |= GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
++
+ for_each_possible_cpu(cpu) {
+ for (i = page_start; i < page_end; i++) {
+ struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
+@@ -262,6 +265,7 @@ static void pcpu_post_map_flush(struct p
+ * @chunk: chunk of interest
+ * @page_start: the start page
+ * @page_end: the end page
++ * @gfp: allocation flags passed to the underlying memory allocator
+ *
+ * For each cpu, populate and map pages [@page_start,@page_end) into
+ * @chunk.
+@@ -270,7 +274,7 @@ static void pcpu_post_map_flush(struct p
+ * pcpu_alloc_mutex, does GFP_KERNEL allocation.
+ */
+ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
+- int page_start, int page_end)
++ int page_start, int page_end, gfp_t gfp)
+ {
+ struct page **pages;
+
+@@ -278,7 +282,7 @@ static int pcpu_populate_chunk(struct pc
+ if (!pages)
+ return -ENOMEM;
+
+- if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
++ if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
+ return -ENOMEM;
+
+ if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
+@@ -325,12 +329,12 @@ static void pcpu_depopulate_chunk(struct
+ pcpu_free_pages(chunk, pages, page_start, page_end);
+ }
+
+-static struct pcpu_chunk *pcpu_create_chunk(void)
++static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+ {
+ struct pcpu_chunk *chunk;
+ struct vm_struct **vms;
+
+- chunk = pcpu_alloc_chunk();
++ chunk = pcpu_alloc_chunk(gfp);
+ if (!chunk)
+ return NULL;
+
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -447,10 +447,12 @@ static void pcpu_next_fit_region(struct
+ /**
+ * pcpu_mem_zalloc - allocate memory
+ * @size: bytes to allocate
++ * @gfp: allocation flags
+ *
+ * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
+- * kzalloc() is used; otherwise, vzalloc() is used. The returned
+- * memory is always zeroed.
++ * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
++ * This is to facilitate passing through whitelisted flags. The
++ * returned memory is always zeroed.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+@@ -458,15 +460,16 @@ static void pcpu_next_fit_region(struct
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+-static void *pcpu_mem_zalloc(size_t size)
++static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
+ {
+ if (WARN_ON_ONCE(!slab_is_available()))
+ return NULL;
+
+ if (size <= PAGE_SIZE)
+- return kzalloc(size, GFP_KERNEL);
++ return kzalloc(size, gfp | GFP_KERNEL);
+ else
+- return vzalloc(size);
++ return __vmalloc(size, gfp | GFP_KERNEL | __GFP_ZERO,
++ PAGE_KERNEL);
+ }
+
+ /**
+@@ -1154,12 +1157,12 @@ static struct pcpu_chunk * __init pcpu_a
+ return chunk;
+ }
+
+-static struct pcpu_chunk *pcpu_alloc_chunk(void)
++static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
+ {
+ struct pcpu_chunk *chunk;
+ int region_bits;
+
+- chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
++ chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
+ if (!chunk)
+ return NULL;
+
+@@ -1168,17 +1171,17 @@ static struct pcpu_chunk *pcpu_alloc_chu
+ region_bits = pcpu_chunk_map_bits(chunk);
+
+ chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
+- sizeof(chunk->alloc_map[0]));
++ sizeof(chunk->alloc_map[0]), gfp);
+ if (!chunk->alloc_map)
+ goto alloc_map_fail;
+
+ chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
+- sizeof(chunk->bound_map[0]));
++ sizeof(chunk->bound_map[0]), gfp);
+ if (!chunk->bound_map)
+ goto bound_map_fail;
+
+ chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
+- sizeof(chunk->md_blocks[0]));
++ sizeof(chunk->md_blocks[0]), gfp);
+ if (!chunk->md_blocks)
+ goto md_blocks_fail;
+
+@@ -1277,9 +1280,10 @@ static void pcpu_chunk_depopulated(struc
+ * pcpu_addr_to_page - translate address to physical address
+ * pcpu_verify_alloc_info - check alloc_info is acceptable during init
+ */
+-static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
++static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size,
++ gfp_t gfp);
+ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
+-static struct pcpu_chunk *pcpu_create_chunk(void);
++static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
+ static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
+ static struct page *pcpu_addr_to_page(void *addr);
+ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
+@@ -1421,7 +1425,7 @@ restart:
+ }
+
+ if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+- chunk = pcpu_create_chunk();
++ chunk = pcpu_create_chunk(0);
+ if (!chunk) {
+ err = "failed to allocate new chunk";
+ goto fail;
+@@ -1450,7 +1454,7 @@ area_found:
+ page_start, page_end) {
+ WARN_ON(chunk->immutable);
+
+- ret = pcpu_populate_chunk(chunk, rs, re);
++ ret = pcpu_populate_chunk(chunk, rs, re, 0);
+
+ spin_lock_irqsave(&pcpu_lock, flags);
+ if (ret) {
+@@ -1561,10 +1565,17 @@ void __percpu *__alloc_reserved_percpu(s
+ * pcpu_balance_workfn - manage the amount of free chunks and populated pages
+ * @work: unused
+ *
+- * Reclaim all fully free chunks except for the first one.
++ * Reclaim all fully free chunks except for the first one. This is also
++ * responsible for maintaining the pool of empty populated pages. However,
++ * it is possible that this is called when physical memory is scarce causing
++ * OOM killer to be triggered. We should avoid doing so until an actual
++ * allocation causes the failure as it is possible that requests can be
++ * serviced from already backed regions.
+ */
+ static void pcpu_balance_workfn(struct work_struct *work)
+ {
++ /* gfp flags passed to underlying allocators */
++ const gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
+ LIST_HEAD(to_free);
+ struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
+ struct pcpu_chunk *chunk, *next;
+@@ -1645,7 +1656,7 @@ retry_pop:
+ chunk->nr_pages) {
+ int nr = min(re - rs, nr_to_pop);
+
+- ret = pcpu_populate_chunk(chunk, rs, rs + nr);
++ ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
+ if (!ret) {
+ nr_to_pop -= nr;
+ spin_lock_irq(&pcpu_lock);
+@@ -1662,7 +1673,7 @@ retry_pop:
+
+ if (nr_to_pop) {
+ /* ran out of chunks to populate, create a new one and retry */
+- chunk = pcpu_create_chunk();
++ chunk = pcpu_create_chunk(gfp);
+ if (chunk) {
+ spin_lock_irq(&pcpu_lock);
+ pcpu_chunk_relocate(chunk, -1);
--- /dev/null
+From 59fba0869acae06ff594dd7e9808ed673f53538a Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 10 Jan 2018 17:35:43 +0100
+Subject: phy: qcom-ufs: add MODULE_LICENSE tag
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 59fba0869acae06ff594dd7e9808ed673f53538a upstream.
+
+While the specific UFS PHY drivers (14nm and 20nm) have a module
+license, the common base module does not, leading to a Kbuild
+failure:
+
+WARNING: modpost: missing MODULE_LICENSE() in drivers/phy/qualcomm/phy-qcom-ufs.o
+FATAL: modpost: GPL-incompatible module phy-qcom-ufs.ko uses GPL-only symbol 'clk_enable'
+
+This adds a module description and license tag to fix the build.
+I added both Yaniv and Vivek as authors here, as Yaniv sent the initial
+submission, while Vivek did most of the work since.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/phy/qualcomm/phy-qcom-ufs.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
++++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
+@@ -689,3 +689,8 @@ int ufs_qcom_phy_power_off(struct phy *g
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
++
++MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
++MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
++MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
++MODULE_LICENSE("GPL v2");
rdma-ucma-check-that-device-is-connected-prior-to-access-it.patch
rdma-ucma-check-that-device-exists-prior-to-accessing-it.patch
rdma-ucma-introduce-safer-rdma_addr_size-variants.patch
+net-xfrm-use-preempt-safe-this_cpu_read-in-ipcomp_alloc_tfms.patch
+xfrm-refuse-to-insert-32-bit-userspace-socket-policies-on-64-bit-systems.patch
+percpu-add-__gfp_noretry-semantics-to-the-percpu-balancing-path.patch
+netfilter-x_tables-make-allocation-less-aggressive.patch
+netfilter-bridge-ebt_among-add-more-missing-match-size-checks.patch
+l2tp-fix-races-with-ipv4-mapped-ipv6-addresses.patch
+netfilter-drop-template-ct-when-conntrack-is-skipped.patch
+netfilter-x_tables-add-and-use-xt_check_proc_name.patch
+phy-qcom-ufs-add-module_license-tag.patch
+bluetooth-fix-missing-encryption-refresh-on-security-request.patch
--- /dev/null
+From 19d7df69fdb2636856dc8919de72fc1bf8f79598 Mon Sep 17 00:00:00 2001
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Thu, 1 Feb 2018 08:49:23 +0100
+Subject: xfrm: Refuse to insert 32 bit userspace socket policies on 64 bit systems
+
+From: Steffen Klassert <steffen.klassert@secunet.com>
+
+commit 19d7df69fdb2636856dc8919de72fc1bf8f79598 upstream.
+
+We don't have a compat layer for xfrm, so userspace and kernel
+structures have different sizes in this case. This results in
+a broken configuration, so refuse to configure socket policies
+when trying to insert from 32 bit userspace as we do it already
+with policies inserted via netlink.
+
+Reported-and-tested-by: syzbot+e1a1577ca8bcb47b769a@syzkaller.appspotmail.com
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_state.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2050,6 +2050,11 @@ int xfrm_user_policy(struct sock *sk, in
+ struct xfrm_mgr *km;
+ struct xfrm_policy *pol = NULL;
+
++#ifdef CONFIG_COMPAT
++ if (in_compat_syscall())
++ return -EOPNOTSUPP;
++#endif
++
+ if (!optval && !optlen) {
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+ xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);