--- /dev/null
+From 955d3411a17f590364238bd0d3329b61f20c1cd2 Mon Sep 17 00:00:00 2001
+From: Sven Eckelmann <sven@narfation.org>
+Date: Sun, 30 Dec 2018 12:46:01 +0100
+Subject: batman-adv: Avoid WARN on net_device without parent in netns
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit 955d3411a17f590364238bd0d3329b61f20c1cd2 upstream.
+
+It is not allowed to use WARN* helpers on potential incorrect input from
+the user or transient problems because systems configured as panic_on_warn
+will reboot due to such a problem.
+
+A NULL return value of __dev_get_by_index can be caused by various problems
+which can either be related to the system configuration or problems
+(incorrectly returned network namespaces) in other (virtual) net_device
+drivers. batman-adv should not cause a (harmful) WARN in this situation and
+instead only report it via a simple message.
+
+Fixes: b7eddd0b3950 ("batman-adv: prevent using any virtual device created on batman-adv as hard-interface")
+Reported-by: syzbot+c764de0fcfadca9a8595@syzkaller.appspotmail.com
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/batman-adv/hard-interface.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -20,7 +20,6 @@
+ #include "main.h"
+
+ #include <linux/atomic.h>
+-#include <linux/bug.h>
+ #include <linux/byteorder/generic.h>
+ #include <linux/errno.h>
+ #include <linux/gfp.h>
+@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(co
+ parent_dev = __dev_get_by_index((struct net *)parent_net,
+ dev_get_iflink(net_dev));
+ /* if we got a NULL parent_dev there is something broken.. */
+- if (WARN(!parent_dev, "Cannot find parent device"))
++ if (!parent_dev) {
++ pr_err("Cannot find parent device\n");
+ return false;
++ }
+
+ if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
+ return false;
--- /dev/null
+From 9114daa825fc3f335f9bea3313ce667090187280 Mon Sep 17 00:00:00 2001
+From: Sven Eckelmann <sven@narfation.org>
+Date: Mon, 31 Dec 2018 22:31:01 +0100
+Subject: batman-adv: Force mac header to start of data on xmit
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit 9114daa825fc3f335f9bea3313ce667090187280 upstream.
+
+The caller of ndo_start_xmit may not already have called
+skb_reset_mac_header. The returned value of skb_mac_header/eth_hdr
+therefore can be in the wrong position and even outside the current skbuff.
+This for example happens when the user binds to the device using a
+PF_PACKET-SOCK_RAW with enabled qdisc-bypass:
+
+ int opt = 4;
+ setsockopt(sock, SOL_PACKET, PACKET_QDISC_BYPASS, &opt, sizeof(opt));
+
+Since eth_hdr is used all over the codebase, the batadv_interface_tx
+function must always take care of resetting it.
+
+Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
+Reported-by: syzbot+9d7405c7faa390e60b4e@syzkaller.appspotmail.com
+Reported-by: syzbot+7d20bc3f1ddddc0f9079@syzkaller.appspotmail.com
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/batman-adv/soft-interface.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(s
+
+ netif_trans_update(soft_iface);
+ vid = batadv_get_vid(skb, 0);
++
++ skb_reset_mac_header(skb);
+ ethhdr = eth_hdr(skb);
+
+ switch (ntohs(ethhdr->h_proto)) {
--- /dev/null
+From 4aac9228d16458cedcfd90c7fb37211cf3653ac3 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 14 Jan 2019 21:13:10 +0100
+Subject: libceph: avoid KEEPALIVE_PENDING races in ceph_con_keepalive()
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 4aac9228d16458cedcfd90c7fb37211cf3653ac3 upstream.
+
+con_fault() can transition the connection into STANDBY right after
+ceph_con_keepalive() clears STANDBY in clear_standby():
+
+ libceph user thread ceph-msgr worker
+
+ceph_con_keepalive()
+ mutex_lock(&con->mutex)
+ clear_standby(con)
+ mutex_unlock(&con->mutex)
+ mutex_lock(&con->mutex)
+ con_fault()
+ ...
+ if KEEPALIVE_PENDING isn't set
+ set state to STANDBY
+ ...
+ mutex_unlock(&con->mutex)
+ set KEEPALIVE_PENDING
+ set WRITE_PENDING
+
+This triggers warnings in clear_standby() when either ceph_con_send()
+or ceph_con_keepalive() get to clearing STANDBY next time.
+
+I don't see a reason to condition queue_con() call on the previous
+value of KEEPALIVE_PENDING, so move the setting of KEEPALIVE_PENDING
+into the critical section -- unlike WRITE_PENDING, KEEPALIVE_PENDING
+could have been a non-atomic flag.
+
+Reported-by: syzbot+acdeb633f6211ccdf886@syzkaller.appspotmail.com
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Tested-by: Myungho Jung <mhjungk@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ceph/messenger.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -3240,9 +3240,10 @@ void ceph_con_keepalive(struct ceph_conn
+ dout("con_keepalive %p\n", con);
+ mutex_lock(&con->mutex);
+ clear_standby(con);
++ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
+ mutex_unlock(&con->mutex);
+- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
+- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
++
++ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+ queue_con(con);
+ }
+ EXPORT_SYMBOL(ceph_con_keepalive);
--- /dev/null
+From 8fdd60f2ae3682caf2a7258626abc21eb4711892 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 31 Jan 2019 23:41:11 -0500
+Subject: Revert "ext4: use ext4_write_inode() when fsyncing w/o a journal"
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 8fdd60f2ae3682caf2a7258626abc21eb4711892 upstream.
+
+This reverts commit ad211f3e94b314a910d4af03178a0b52a7d1ee0a.
+
+As Jan Kara pointed out, this change was unsafe since it means we lose
+the call to sync_mapping_buffers() in the nojournal case. The
+original point of the commit was avoid taking the inode mutex (since
+it causes a lockdep warning in generic/113); but we need the mutex in
+order to call sync_mapping_buffers().
+
+The real fix to this problem was discussed here:
+
+https://lore.kernel.org/lkml/20181025150540.259281-4-bvanassche@acm.org
+
+The proposed patch was to fix a syzbot complaint, but the problem can
+also demonstrated via "kvm-xfstests -c nojournal generic/113".
+Multiple solutions were discused in the e-mail thread, but none have
+landed in the kernel as of this writing. Anyway, commit
+ad211f3e94b314 is absolutely the wrong way to suppress the lockdep, so
+revert it.
+
+Fixes: ad211f3e94b314a910d4af03178a0b52a7d1ee0a ("ext4: use ext4_write_inode() when fsyncing w/o a journal")
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reported: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/fsync.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, lo
+ goto out;
+ }
+
+- ret = file_write_and_wait_range(file, start, end);
+- if (ret)
+- return ret;
+-
+ if (!journal) {
+- struct writeback_control wbc = {
+- .sync_mode = WB_SYNC_ALL
+- };
+-
+- ret = ext4_write_inode(inode, &wbc);
++ ret = __generic_file_fsync(file, start, end, datasync);
+ if (!ret)
+ ret = ext4_sync_parent(inode);
+ if (test_opt(inode->i_sb, BARRIER))
+@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, lo
+ goto out;
+ }
+
++ ret = file_write_and_wait_range(file, start, end);
++ if (ret)
++ return ret;
+ /*
+ * data=writeback,ordered:
+ * The caller's filemap_fdatawrite()/wait will sync the data.
drm-vmwgfx-return-error-code-from-vmw_execbuf_copy_fence_user.patch
sunrpc-always-drop-the-xprt_lock-on-xprt_close_wait.patch
xfrm-make-set-mark-default-behavior-backward-compatible.patch
+revert-ext4-use-ext4_write_inode-when-fsyncing-w-o-a-journal.patch
+libceph-avoid-keepalive_pending-races-in-ceph_con_keepalive.patch
+xfrm-refine-validation-of-template-and-selector-families.patch
+batman-adv-avoid-warn-on-net_device-without-parent-in-netns.patch
+batman-adv-force-mac-header-to-start-of-data-on-xmit.patch
+svcrdma-reduce-max_send_sges.patch
+svcrdma-remove-max_sge-check-at-connect-time.patch
--- /dev/null
+From f3c1fd0ee294abd4367dfa72d89f016c682202f0 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Mon, 1 Oct 2018 14:15:56 -0400
+Subject: svcrdma: Reduce max_send_sges
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit f3c1fd0ee294abd4367dfa72d89f016c682202f0 upstream.
+
+There's no need to request a large number of send SGEs because the
+inline threshold already constrains the number of SGEs per Send.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: Don Dutile <ddutile@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_transport.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -475,10 +475,12 @@ static struct svc_xprt *svc_rdma_accept(
+
+ /* Qualify the transport resource defaults with the
+ * capabilities of this particular device */
+- newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
+- /* transport hdr, head iovec, one page list entry, tail iovec */
+- if (newxprt->sc_max_send_sges < 4) {
+- pr_err("svcrdma: too few Send SGEs available (%d)\n",
++ /* Transport header, head iovec, tail iovec */
++ newxprt->sc_max_send_sges = 3;
++ /* Add one SGE per page list entry */
++ newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
++ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
++ pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
+ newxprt->sc_max_send_sges);
+ goto errout;
+ }
--- /dev/null
+From e248aa7be86e8179f20ac0931774ecd746f3f5bf Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Fri, 25 Jan 2019 16:54:54 -0500
+Subject: svcrdma: Remove max_sge check at connect time
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit e248aa7be86e8179f20ac0931774ecd746f3f5bf upstream.
+
+Two and a half years ago, the client was changed to use gathered
+Send for larger inline messages, in commit 655fec6987b ("xprtrdma:
+Use gathered Send for large inline messages"). Several fixes were
+required because there are a few in-kernel device drivers whose
+max_sge is 3, and these were broken by the change.
+
+Apparently my memory is going, because some time later, I submitted
+commit 25fd86eca11c ("svcrdma: Don't overrun the SGE array in
+svc_rdma_send_ctxt"), and after that, commit f3c1fd0ee294 ("svcrdma:
+Reduce max_send_sges"). These too incorrectly assumed in-kernel
+device drivers would have more than a few Send SGEs available.
+
+The fix for the server side is not the same. This is because the
+fundamental problem on the server is that, whether or not the client
+has provisioned a chunk for the RPC reply, the server must squeeze
+even the most complex RPC replies into a single RDMA Send. Failing
+in the send path because of Send SGE exhaustion should never be an
+option.
+
+Therefore, instead of failing when the send path runs out of SGEs,
+switch to using a bounce buffer mechanism to handle RPC replies that
+are too complex for the device to send directly. That allows us to
+remove the max_sge check to enable drivers with small max_sge to
+work again.
+
+Reported-by: Don Dutile <ddutile@redhat.com>
+Fixes: 25fd86eca11c ("svcrdma: Don't overrun the SGE array in ...")
+Cc: stable@vger.kernel.org
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 105 +++++++++++++++++++++++++++++--
+ net/sunrpc/xprtrdma/svc_rdma_transport.c | 9 --
+ 2 files changed, 102 insertions(+), 12 deletions(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcx
+ DMA_TO_DEVICE);
+ }
+
++/* If the xdr_buf has more elements than the device can
++ * transmit in a single RDMA Send, then the reply will
++ * have to be copied into a bounce buffer.
++ */
++static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
++ struct xdr_buf *xdr,
++ __be32 *wr_lst)
++{
++ int elements;
++
++ /* xdr->head */
++ elements = 1;
++
++ /* xdr->pages */
++ if (!wr_lst) {
++ unsigned int remaining;
++ unsigned long pageoff;
++
++ pageoff = xdr->page_base & ~PAGE_MASK;
++ remaining = xdr->page_len;
++ while (remaining) {
++ ++elements;
++ remaining -= min_t(u32, PAGE_SIZE - pageoff,
++ remaining);
++ pageoff = 0;
++ }
++ }
++
++ /* xdr->tail */
++ if (xdr->tail[0].iov_len)
++ ++elements;
++
++ /* assume 1 SGE is needed for the transport header */
++ return elements >= rdma->sc_max_send_sges;
++}
++
++/* The device is not capable of sending the reply directly.
++ * Assemble the elements of @xdr into the transport header
++ * buffer.
++ */
++static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
++ struct svc_rdma_send_ctxt *ctxt,
++ struct xdr_buf *xdr, __be32 *wr_lst)
++{
++ unsigned char *dst, *tailbase;
++ unsigned int taillen;
++
++ dst = ctxt->sc_xprt_buf;
++ dst += ctxt->sc_sges[0].length;
++
++ memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
++ dst += xdr->head[0].iov_len;
++
++ tailbase = xdr->tail[0].iov_base;
++ taillen = xdr->tail[0].iov_len;
++ if (wr_lst) {
++ u32 xdrpad;
++
++ xdrpad = xdr_padsize(xdr->page_len);
++ if (taillen && xdrpad) {
++ tailbase += xdrpad;
++ taillen -= xdrpad;
++ }
++ } else {
++ unsigned int len, remaining;
++ unsigned long pageoff;
++ struct page **ppages;
++
++ ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
++ pageoff = xdr->page_base & ~PAGE_MASK;
++ remaining = xdr->page_len;
++ while (remaining) {
++ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
++
++ memcpy(dst, page_address(*ppages), len);
++ remaining -= len;
++ dst += len;
++ pageoff = 0;
++ }
++ }
++
++ if (taillen)
++ memcpy(dst, tailbase, taillen);
++
++ ctxt->sc_sges[0].length += xdr->len;
++ ib_dma_sync_single_for_device(rdma->sc_pd->device,
++ ctxt->sc_sges[0].addr,
++ ctxt->sc_sges[0].length,
++ DMA_TO_DEVICE);
++
++ return 0;
++}
++
+ /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
+ * @rdma: controlling transport
+ * @ctxt: send_ctxt for the Send WR
+@@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxpr
+ u32 xdr_pad;
+ int ret;
+
+- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
+- return -EIO;
++ if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
++ return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
++
++ ++ctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_buf(rdma, ctxt,
+ xdr->head[0].iov_base,
+ xdr->head[0].iov_len);
+@@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxpr
+ while (remaining) {
+ len = min_t(u32, PAGE_SIZE - page_off, remaining);
+
+- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
+- return -EIO;
++ ++ctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
+ page_off, len);
+ if (ret < 0)
+@@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxpr
+ len = xdr->tail[0].iov_len;
+ tail:
+ if (len) {
+- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
+- return -EIO;
++ ++ctxt->sc_cur_sge_no;
+ ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
+ if (ret < 0)
+ return ret;
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -478,12 +478,9 @@ static struct svc_xprt *svc_rdma_accept(
+ /* Transport header, head iovec, tail iovec */
+ newxprt->sc_max_send_sges = 3;
+ /* Add one SGE per page list entry */
+- newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
+- if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
+- pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
+- newxprt->sc_max_send_sges);
+- goto errout;
+- }
++ newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
++ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
++ newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
+ newxprt->sc_max_req_size = svcrdma_max_req_size;
+ newxprt->sc_max_requests = svcrdma_max_requests;
+ newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
--- /dev/null
+From 35e6103861a3a970de6c84688c6e7a1f65b164ca Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 9 Jan 2019 14:37:34 +0100
+Subject: xfrm: refine validation of template and selector families
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 35e6103861a3a970de6c84688c6e7a1f65b164ca upstream.
+
+The check assumes that in transport mode, the first templates family
+must match the address family of the policy selector.
+
+Syzkaller managed to build a template using MODE_ROUTEOPTIMIZATION,
+with ipv4-in-ipv6 chain, leading to following splat:
+
+BUG: KASAN: stack-out-of-bounds in xfrm_state_find+0x1db/0x1854
+Read of size 4 at addr ffff888063e57aa0 by task a.out/2050
+ xfrm_state_find+0x1db/0x1854
+ xfrm_tmpl_resolve+0x100/0x1d0
+ xfrm_resolve_and_create_bundle+0x108/0x1000 [..]
+
+Problem is that addresses point into flowi4 struct, but xfrm_state_find
+treats them as being ipv6 because it uses templ->encap_family is used
+(AF_INET6 in case of reproducer) rather than family (AF_INET).
+
+This patch inverts the logic: Enforce 'template family must match
+selector' EXCEPT for tunnel and BEET mode.
+
+In BEET and Tunnel mode, xfrm_tmpl_resolve_one will have remote/local
+address pointers changed to point at the addresses found in the template,
+rather than the flowi ones, so no oob read will occur.
+
+Reported-by: 3ntr0py1337@gmail.com
+Reported-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/xfrm/xfrm_user.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct
+ if (!ut[i].family)
+ ut[i].family = family;
+
+- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+- (ut[i].family != prev_family))
+- return -EINVAL;
+-
++ switch (ut[i].mode) {
++ case XFRM_MODE_TUNNEL:
++ case XFRM_MODE_BEET:
++ break;
++ default:
++ if (ut[i].family != prev_family)
++ return -EINVAL;
++ break;
++ }
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+