--- /dev/null
+From 252f6e8eae909bc075a1b1e3b9efb095ae4c0b56 Mon Sep 17 00:00:00 2001
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Date: Wed, 16 Jan 2019 14:29:50 +0300
+Subject: ARCv2: Enable unaligned access in early ASM code
+
+From: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+
+commit 252f6e8eae909bc075a1b1e3b9efb095ae4c0b56 upstream.
+
+It is currently done in arc_init_IRQ() which might be too late
+considering gcc 7.3.1 onwards (GNU 2018.03) generates unaligned
+memory accesses by default
+
+Cc: stable@vger.kernel.org #4.4+
+Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+[vgupta: rewrote changelog]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/head.S | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -17,6 +17,7 @@
+ #include <asm/entry.h>
+ #include <asm/arcregs.h>
+ #include <asm/cache.h>
++#include <asm/irqflags.h>
+
+ .macro CPU_EARLY_SETUP
+
+@@ -47,6 +48,15 @@
+ sr r5, [ARC_REG_DC_CTRL]
+
+ 1:
++
++#ifdef CONFIG_ISA_ARCV2
++ ; Unaligned access is disabled at reset, so re-enable early as
++ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
++ ; by default
++ lr r5, [status32]
++ bset r5, r5, STATUS_AD_BIT
++ kflag r5
++#endif
+ .endm
+
+ .section .init.text, "ax",@progbits
--- /dev/null
+From foo@baz Sun Feb 24 14:41:29 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 11 Feb 2019 14:41:22 -0800
+Subject: batman-adv: fix uninit-value in batadv_interface_tx()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4ffcbfac60642f63ae3d80891f573ba7e94a265c ]
+
+KMSAN reported batadv_interface_tx() was possibly using a
+garbage value [1]
+
+batadv_get_vid() does have a pskb_may_pull() call
+but batadv_interface_tx() does not actually make sure
+this did not fail.
+
+[1]
+BUG: KMSAN: uninit-value in batadv_interface_tx+0x908/0x1e40 net/batman-adv/soft-interface.c:231
+CPU: 0 PID: 10006 Comm: syz-executor469 Not tainted 4.20.0-rc7+ #5
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x173/0x1d0 lib/dump_stack.c:113
+ kmsan_report+0x12e/0x2a0 mm/kmsan/kmsan.c:613
+ __msan_warning+0x82/0xf0 mm/kmsan/kmsan_instr.c:313
+ batadv_interface_tx+0x908/0x1e40 net/batman-adv/soft-interface.c:231
+ __netdev_start_xmit include/linux/netdevice.h:4356 [inline]
+ netdev_start_xmit include/linux/netdevice.h:4365 [inline]
+ xmit_one net/core/dev.c:3257 [inline]
+ dev_hard_start_xmit+0x607/0xc40 net/core/dev.c:3273
+ __dev_queue_xmit+0x2e42/0x3bc0 net/core/dev.c:3843
+ dev_queue_xmit+0x4b/0x60 net/core/dev.c:3876
+ packet_snd net/packet/af_packet.c:2928 [inline]
+ packet_sendmsg+0x8306/0x8f30 net/packet/af_packet.c:2953
+ sock_sendmsg_nosec net/socket.c:621 [inline]
+ sock_sendmsg net/socket.c:631 [inline]
+ __sys_sendto+0x8c4/0xac0 net/socket.c:1788
+ __do_sys_sendto net/socket.c:1800 [inline]
+ __se_sys_sendto+0x107/0x130 net/socket.c:1796
+ __x64_sys_sendto+0x6e/0x90 net/socket.c:1796
+ do_syscall_64+0xbc/0xf0 arch/x86/entry/common.c:291
+ entry_SYSCALL_64_after_hwframe+0x63/0xe7
+RIP: 0033:0x441889
+Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 bb 10 fc ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007ffdda6fd468 EFLAGS: 00000216 ORIG_RAX: 000000000000002c
+RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 0000000000441889
+RDX: 000000000000000e RSI: 00000000200000c0 RDI: 0000000000000003
+RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000216 R12: 00007ffdda6fd4c0
+R13: 00007ffdda6fd4b0 R14: 0000000000000000 R15: 0000000000000000
+
+Uninit was created at:
+ kmsan_save_stack_with_flags mm/kmsan/kmsan.c:204 [inline]
+ kmsan_internal_poison_shadow+0x92/0x150 mm/kmsan/kmsan.c:158
+ kmsan_kmalloc+0xa6/0x130 mm/kmsan/kmsan_hooks.c:176
+ kmsan_slab_alloc+0xe/0x10 mm/kmsan/kmsan_hooks.c:185
+ slab_post_alloc_hook mm/slab.h:446 [inline]
+ slab_alloc_node mm/slub.c:2759 [inline]
+ __kmalloc_node_track_caller+0xe18/0x1030 mm/slub.c:4383
+ __kmalloc_reserve net/core/skbuff.c:137 [inline]
+ __alloc_skb+0x309/0xa20 net/core/skbuff.c:205
+ alloc_skb include/linux/skbuff.h:998 [inline]
+ alloc_skb_with_frags+0x1c7/0xac0 net/core/skbuff.c:5220
+ sock_alloc_send_pskb+0xafd/0x10e0 net/core/sock.c:2083
+ packet_alloc_skb net/packet/af_packet.c:2781 [inline]
+ packet_snd net/packet/af_packet.c:2872 [inline]
+ packet_sendmsg+0x661a/0x8f30 net/packet/af_packet.c:2953
+ sock_sendmsg_nosec net/socket.c:621 [inline]
+ sock_sendmsg net/socket.c:631 [inline]
+ __sys_sendto+0x8c4/0xac0 net/socket.c:1788
+ __do_sys_sendto net/socket.c:1800 [inline]
+ __se_sys_sendto+0x107/0x130 net/socket.c:1796
+ __x64_sys_sendto+0x6e/0x90 net/socket.c:1796
+ do_syscall_64+0xbc/0xf0 arch/x86/entry/common.c:291
+ entry_SYSCALL_64_after_hwframe+0x63/0xe7
+
+Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Marek Lindner <mareklindner@neomailbox.ch>
+Cc: Simon Wunderlich <sw@simonwunderlich.de>
+Cc: Antonio Quartulli <a@unstable.cc>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/soft-interface.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -215,6 +215,8 @@ static int batadv_interface_tx(struct sk
+
+ switch (ntohs(ethhdr->h_proto)) {
+ case ETH_P_8021Q:
++ if (!pskb_may_pull(skb, sizeof(*vhdr)))
++ goto dropped;
+ vhdr = vlan_eth_hdr(skb);
+
+ if (vhdr->h_vlan_encapsulated_proto != ethertype) {
--- /dev/null
+From foo@baz Sun Feb 24 08:42:25 CET 2019
+From: Saeed Mahameed <saeedm@mellanox.com>
+Date: Mon, 11 Feb 2019 18:04:17 +0200
+Subject: net/mlx4_en: Force CHECKSUM_NONE for short ethernet frames
+
+From: Saeed Mahameed <saeedm@mellanox.com>
+
+[ Upstream commit 29dded89e80e3fff61efb34f07a8a3fba3ea146d ]
+
+When an ethernet frame is padded to meet the minimum ethernet frame
+size, the padding octets are not covered by the hardware checksum.
+Fortunately the padding octets are usually zero's, which don't affect
+checksum. However, it is not guaranteed. For example, switches might
+choose to make other use of these octets.
+This repeatedly causes kernel hardware checksum fault.
+
+Prior to the cited commit below, skb checksum was forced to be
+CHECKSUM_NONE when padding is detected. After it, we need to keep
+skb->csum updated. However, fixing up CHECKSUM_COMPLETE requires to
+verify and parse IP headers, it does not worth the effort as the packets
+are so small that CHECKSUM_COMPLETE has no significant advantage.
+
+Future work: when reporting checksum complete is not an option for
+IP non-TCP/UDP packets, we can actually fallback to report checksum
+unnecessary, by looking at cqe IPOK bit.
+
+Fixes: 88078d98d1bb ("net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends")
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -725,13 +725,27 @@ static int get_fixed_ipv6_csum(__wsum hw
+ return 0;
+ }
+ #endif
++
++#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
++
+ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ netdev_features_t dev_features)
+ {
+ __wsum hw_checksum = 0;
++ void *hdr;
+
+- void *hdr = (u8 *)va + sizeof(struct ethhdr);
++ /* CQE csum doesn't cover padding octets in short ethernet
++ * frames. And the pad field is appended prior to calculating
++ * and appending the FCS field.
++ *
++ * Detecting these padded frames requires to verify and parse
++ * IP headers, so we simply force all those small frames to skip
++ * checksum complete.
++ */
++ if (short_frame(skb->len))
++ return -EINVAL;
+
++ hdr = (u8 *)va + sizeof(struct ethhdr);
+ hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
+@@ -851,6 +865,11 @@ int mlx4_en_process_rx_cq(struct net_dev
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
++ /* TODO: For IP non TCP/UDP packets when csum complete is
++ * not an option (not supported or any other reason) we can
++ * actually check cqe IPOK status bit and report
++ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
++ */
+ if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
--- /dev/null
+From foo@baz Sun Feb 24 14:41:29 CET 2019
+From: Kal Conley <kal.conley@dectris.com>
+Date: Sun, 10 Feb 2019 09:57:11 +0100
+Subject: net/packet: fix 4gb buffer limit due to overflow check
+
+From: Kal Conley <kal.conley@dectris.com>
+
+[ Upstream commit fc62814d690cf62189854464f4bd07457d5e9e50 ]
+
+When calculating rb->frames_per_block * req->tp_block_nr the result
+can overflow. Check it for overflow without limiting the total buffer
+size to UINT_MAX.
+
+This change fixes support for packet ring buffers >= UINT_MAX.
+
+Fixes: 8f8d28e4d6d8 ("net/packet: fix overflow in check for tp_frame_nr")
+Signed-off-by: Kal Conley <kal.conley@dectris.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4217,7 +4217,7 @@ static int packet_set_ring(struct sock *
+ rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
+ if (unlikely(rb->frames_per_block == 0))
+ goto out;
+- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
++ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
+ goto out;
+ if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
+ req->tp_frame_nr))
rdma-srp-rework-scsi-device-reset-handling.patch
keys-user-align-the-payload-buffer.patch
keys-always-initialize-keyring_index_key-desc_len.patch
+batman-adv-fix-uninit-value-in-batadv_interface_tx.patch
+net-packet-fix-4gb-buffer-limit-due-to-overflow-check.patch
+team-avoid-complex-list-operations-in-team_nl_cmd_options_set.patch
+sit-check-if-ipv6-enabled-before-calling-ip6_err_gen_icmpv6_unreach.patch
+net-mlx4_en-force-checksum_none-for-short-ethernet-frames.patch
+arcv2-enable-unaligned-access-in-early-asm-code.patch
--- /dev/null
+From foo@baz Sun Feb 24 14:41:29 CET 2019
+From: Hangbin Liu <liuhangbin@gmail.com>
+Date: Thu, 7 Feb 2019 18:36:11 +0800
+Subject: sit: check if IPv6 enabled before calling ip6_err_gen_icmpv6_unreach()
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 173656accaf583698bac3f9e269884ba60d51ef4 ]
+
+If we disabled IPv6 from the kernel command line (ipv6.disable=1), we should
+not call ip6_err_gen_icmpv6_unreach(). This:
+
+ ip link add sit1 type sit local 192.0.2.1 remote 192.0.2.2 ttl 1
+ ip link set sit1 up
+ ip addr add 198.51.100.1/24 dev sit1
+ ping 198.51.100.2
+
+if IPv6 is disabled at boot time, will crash the kernel.
+
+v2: there's no need to use in6_dev_get(), use __in6_dev_get() instead,
+ as we only need to check that idev exists and we are under
+ rcu_read_lock() (from netif_receive_skb_internal()).
+
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Fixes: ca15a078bd90 ("sit: generate icmpv6 error when receiving icmpv4 error")
+Cc: Oussama Ghorbel <ghorbel@pivasoftware.com>
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/sit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -577,7 +577,7 @@ static int ipip6_err(struct sk_buff *skb
+ goto out;
+
+ err = 0;
+- if (!ipip6_err_gen_icmpv6_unreach(skb))
++ if (__in6_dev_get(skb->dev) && !ipip6_err_gen_icmpv6_unreach(skb))
+ goto out;
+
+ if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
--- /dev/null
+From foo@baz Sun Feb 24 14:41:29 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 11 Feb 2019 21:59:51 -0800
+Subject: team: avoid complex list operations in team_nl_cmd_options_set()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 2fdeee2549231b1f989f011bb18191f5660d3745 ]
+
+The current opt_inst_list operations inside team_nl_cmd_options_set()
+is too complex to track:
+
+ LIST_HEAD(opt_inst_list);
+ nla_for_each_nested(...) {
+ list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+ if (__team_option_inst_tmp_find(&opt_inst_list, opt_inst))
+ continue;
+ list_add(&opt_inst->tmp_list, &opt_inst_list);
+ }
+ }
+ team_nl_send_event_options_get(team, &opt_inst_list);
+
+as while we retrieve 'opt_inst' from team->option_inst_list, it could
+be added to the local 'opt_inst_list' for multiple times. The
+__team_option_inst_tmp_find() doesn't work, as the setter
+team_mode_option_set() still calls team->ops.exit() which uses
+->tmp_list too in __team_options_change_check().
+
+Simplify the list operations by moving the 'opt_inst_list' and
+team_nl_send_event_options_get() into the nla_for_each_nested() loop so
+that it can be guranteed that we won't insert a same list entry for
+multiple times. Therefore, __team_option_inst_tmp_find() can be removed
+too.
+
+Fixes: 4fb0534fb7bb ("team: avoid adding twice the same option to the event list")
+Fixes: 2fcdb2c9e659 ("team: allow to send multiple set events in one message")
+Reported-by: syzbot+4d4af685432dc0e56c91@syzkaller.appspotmail.com
+Reported-by: syzbot+68ee510075cf64260cc4@syzkaller.appspotmail.com
+Cc: Jiri Pirko <jiri@resnulli.us>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jiri Pirko <jiri@mellanox.com>
+Reviewed-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c | 27 +++++----------------------
+ 1 file changed, 5 insertions(+), 22 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -247,17 +247,6 @@ static void __team_option_inst_mark_remo
+ }
+ }
+
+-static bool __team_option_inst_tmp_find(const struct list_head *opts,
+- const struct team_option_inst *needle)
+-{
+- struct team_option_inst *opt_inst;
+-
+- list_for_each_entry(opt_inst, opts, tmp_list)
+- if (opt_inst == needle)
+- return true;
+- return false;
+-}
+-
+ static int __team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+@@ -2447,7 +2436,6 @@ static int team_nl_cmd_options_set(struc
+ int err = 0;
+ int i;
+ struct nlattr *nl_option;
+- LIST_HEAD(opt_inst_list);
+
+ team = team_nl_team_get(info);
+ if (!team)
+@@ -2463,6 +2451,7 @@ static int team_nl_cmd_options_set(struc
+ struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
+ struct nlattr *attr;
+ struct nlattr *attr_data;
++ LIST_HEAD(opt_inst_list);
+ enum team_option_type opt_type;
+ int opt_port_ifindex = 0; /* != 0 for per-port options */
+ u32 opt_array_index = 0;
+@@ -2566,23 +2555,17 @@ static int team_nl_cmd_options_set(struc
+ if (err)
+ goto team_put;
+ opt_inst->changed = true;
+-
+- /* dumb/evil user-space can send us duplicate opt,
+- * keep only the last one
+- */
+- if (__team_option_inst_tmp_find(&opt_inst_list,
+- opt_inst))
+- continue;
+-
+ list_add(&opt_inst->tmp_list, &opt_inst_list);
+ }
+ if (!opt_found) {
+ err = -ENOENT;
+ goto team_put;
+ }
+- }
+
+- err = team_nl_send_event_options_get(team, &opt_inst_list);
++ err = team_nl_send_event_options_get(team, &opt_inst_list);
++ if (err)
++ break;
++ }
+
+ team_put:
+ team_nl_team_put(team);