--- /dev/null
+From 0a20b8a6838b35dbf19fa611a9d73ced662ebe92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2019 16:27:34 -0500
+Subject: nbd: verify socket is supported during setup
+
+From: Mike Christie <mchristi@redhat.com>
+
+[ Upstream commit cf1b2326b734896734c6e167e41766f9cee7686a ]
+
+nbd requires socket families to support the shutdown method so the nbd
+recv workqueue can be woken up from its sock_recvmsg call. If the socket
+does not support the callout we will leave recv works running or get hangs
+later when the device or module is removed.
+
+This adds a check during socket connection/reconnection to make sure the
+socket being passed in supports the needed callout.
+
+Reported-by: syzbot+24c12fa8d218ed26011a@syzkaller.appspotmail.com
+Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs")
+Tested-by: Richard W.M. Jones <rjones@redhat.com>
+Signed-off-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/nbd.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index bd164192045b0..9650777d0aaf1 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -935,6 +935,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ return ret;
+ }
+
++static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
++ int *err)
++{
++ struct socket *sock;
++
++ *err = 0;
++ sock = sockfd_lookup(fd, err);
++ if (!sock)
++ return NULL;
++
++ if (sock->ops->shutdown == sock_no_shutdown) {
++ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
++ *err = -EINVAL;
++ return NULL;
++ }
++
++ return sock;
++}
++
+ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+ bool netlink)
+ {
+@@ -944,7 +963,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+ struct nbd_sock *nsock;
+ int err;
+
+- sock = sockfd_lookup(arg, &err);
++ sock = nbd_get_socket(nbd, arg, &err);
+ if (!sock)
+ return err;
+
+@@ -996,7 +1015,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
+ int i;
+ int err;
+
+- sock = sockfd_lookup(arg, &err);
++ sock = nbd_get_socket(nbd, arg, &err);
+ if (!sock)
+ return err;
+
+--
+2.20.1
+
--- /dev/null
+From a3d830d8b47f200911acd995fd8c8c3304b3a094 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Oct 2019 12:10:47 -0500
+Subject: perf annotate: Fix multiple memory and file descriptor leaks
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+[ Upstream commit f948eb45e3af9fb18a0487d0797a773897ef6929 ]
+
+Store SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF in variable *ret*, instead
+of returning in the middle of the function and leaking multiple
+resources: prog_linfo, btf, s and bfdf.
+
+Addresses-Coverity-ID: 1454832 ("Structurally dead code")
+Fixes: 11aad897f6d1 ("perf annotate: Don't return -1 for error when doing BPF disassembly")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20191014171047.GA30850@embeddedor
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/annotate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index fb8756026a805..2e02d2a0176a8 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -1752,7 +1752,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
+ info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+ dso->bpf_prog.id);
+ if (!info_node) {
+- return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
++ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ goto out;
+ }
+ info_linear = info_node->info_linear;
+--
+2.20.1
+
--- /dev/null
+From 6fac142bd2e6227367a16c183d60441300be7e99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Oct 2019 10:33:54 +0200
+Subject: perf/aux: Fix tracking of auxiliary trace buffer allocation
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+[ Upstream commit 5e6c3c7b1ec217c1c4c95d9148182302b9969b97 ]
+
+The following commit from the v5.4 merge window:
+
+ d44248a41337 ("perf/core: Rework memory accounting in perf_mmap()")
+
+... breaks auxiliary trace buffer tracking.
+
+If I run command 'perf record -e rbd000' to record samples and saving
+them in the **auxiliary** trace buffer then the value of 'locked_vm' becomes
+negative after all trace buffers have been allocated and released:
+
+During allocation the values increase:
+
+ [52.250027] perf_mmap user->locked_vm:0x87 pinned_vm:0x0 ret:0
+ [52.250115] perf_mmap user->locked_vm:0x107 pinned_vm:0x0 ret:0
+ [52.250251] perf_mmap user->locked_vm:0x188 pinned_vm:0x0 ret:0
+ [52.250326] perf_mmap user->locked_vm:0x208 pinned_vm:0x0 ret:0
+ [52.250441] perf_mmap user->locked_vm:0x289 pinned_vm:0x0 ret:0
+ [52.250498] perf_mmap user->locked_vm:0x309 pinned_vm:0x0 ret:0
+ [52.250613] perf_mmap user->locked_vm:0x38a pinned_vm:0x0 ret:0
+ [52.250715] perf_mmap user->locked_vm:0x408 pinned_vm:0x2 ret:0
+ [52.250834] perf_mmap user->locked_vm:0x408 pinned_vm:0x83 ret:0
+ [52.250915] perf_mmap user->locked_vm:0x408 pinned_vm:0x103 ret:0
+ [52.251061] perf_mmap user->locked_vm:0x408 pinned_vm:0x184 ret:0
+ [52.251146] perf_mmap user->locked_vm:0x408 pinned_vm:0x204 ret:0
+ [52.251299] perf_mmap user->locked_vm:0x408 pinned_vm:0x285 ret:0
+ [52.251383] perf_mmap user->locked_vm:0x408 pinned_vm:0x305 ret:0
+ [52.251544] perf_mmap user->locked_vm:0x408 pinned_vm:0x386 ret:0
+ [52.251634] perf_mmap user->locked_vm:0x408 pinned_vm:0x406 ret:0
+ [52.253018] perf_mmap user->locked_vm:0x408 pinned_vm:0x487 ret:0
+ [52.253197] perf_mmap user->locked_vm:0x408 pinned_vm:0x508 ret:0
+ [52.253374] perf_mmap user->locked_vm:0x408 pinned_vm:0x589 ret:0
+ [52.253550] perf_mmap user->locked_vm:0x408 pinned_vm:0x60a ret:0
+ [52.253726] perf_mmap user->locked_vm:0x408 pinned_vm:0x68b ret:0
+ [52.253903] perf_mmap user->locked_vm:0x408 pinned_vm:0x70c ret:0
+ [52.254084] perf_mmap user->locked_vm:0x408 pinned_vm:0x78d ret:0
+ [52.254263] perf_mmap user->locked_vm:0x408 pinned_vm:0x80e ret:0
+
+The value of user->locked_vm increases to a limit then the memory
+is tracked by pinned_vm.
+
+During deallocation the size is subtracted from pinned_vm until
+it hits a limit. Then a larger value is subtracted from locked_vm
+leading to a large number (because of type unsigned):
+
+ [64.267797] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x78d
+ [64.267826] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x70c
+ [64.267848] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x68b
+ [64.267869] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x60a
+ [64.267891] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x589
+ [64.267911] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x508
+ [64.267933] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x487
+ [64.267952] perf_mmap_close mmap_user->locked_vm:0x408 pinned_vm:0x406
+ [64.268883] perf_mmap_close mmap_user->locked_vm:0x307 pinned_vm:0x406
+ [64.269117] perf_mmap_close mmap_user->locked_vm:0x206 pinned_vm:0x406
+ [64.269433] perf_mmap_close mmap_user->locked_vm:0x105 pinned_vm:0x406
+ [64.269536] perf_mmap_close mmap_user->locked_vm:0x4 pinned_vm:0x404
+ [64.269797] perf_mmap_close mmap_user->locked_vm:0xffffffffffffff84 pinned_vm:0x303
+ [64.270105] perf_mmap_close mmap_user->locked_vm:0xffffffffffffff04 pinned_vm:0x202
+ [64.270374] perf_mmap_close mmap_user->locked_vm:0xfffffffffffffe84 pinned_vm:0x101
+ [64.270628] perf_mmap_close mmap_user->locked_vm:0xfffffffffffffe04 pinned_vm:0x0
+
+This value sticks for the user until system is rebooted, causing
+follow-on system calls using locked_vm resource limit to fail.
+
+Note: There is no issue using the normal trace buffer.
+
+In fact the issue is in perf_mmap_close(). During allocation auxiliary
+trace buffer memory is either traced as 'extra' and added to 'pinned_vm'
+or trace as 'user_extra' and added to 'locked_vm'. This applies for
+normal trace buffers and auxiliary trace buffer.
+
+However in function perf_mmap_close() all auxiliary trace buffer is
+subtraced from 'locked_vm' and never from 'pinned_vm'. This breaks the
+ballance.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: acme@kernel.org
+Cc: gor@linux.ibm.com
+Cc: hechaol@fb.com
+Cc: heiko.carstens@de.ibm.com
+Cc: linux-perf-users@vger.kernel.org
+Cc: songliubraving@fb.com
+Fixes: d44248a41337 ("perf/core: Rework memory accounting in perf_mmap()")
+Link: https://lkml.kernel.org/r/20191021083354.67868-1-tmricht@linux.ibm.com
+[ Minor readability edits. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 2ff44d7308dd7..53173883513c1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5524,8 +5524,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ perf_pmu_output_stop(event);
+
+ /* now it's safe to free the pages */
+- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
++ if (!rb->aux_mmap_locked)
++ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
++ else
++ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+
+ /* this has to be the last one */
+ rb_free_aux(rb);
+--
+2.20.1
+
sunrpc-fix-race-to-sk_err-after-xs_error_report.patch
s390-uaccess-avoid-false-positive-compiler-warnings.patch
tracing-initialize-iter-seq-after-zeroing-in-tracing.patch
+perf-annotate-fix-multiple-memory-and-file-descripto.patch
+perf-aux-fix-tracking-of-auxiliary-trace-buffer-allo.patch
+usb-legousbtower-fix-a-signedness-bug-in-tower_probe.patch
+nbd-verify-socket-is-supported-during-setup.patch
--- /dev/null
+From 95392a4c4cc0adba4d9d17c5166c6f56e62646e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2019 17:11:15 +0300
+Subject: USB: legousbtower: fix a signedness bug in tower_probe()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit fd47a417e75e2506eb3672ae569b1c87e3774155 ]
+
+The problem is that sizeof() is unsigned long so negative error codes
+are type promoted to high positive values and the condition becomes
+false.
+
+Fixes: 1d427be4a39d ("USB: legousbtower: fix slab info leak at probe")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Johan Hovold <johan@kernel.org>
+Link: https://lore.kernel.org/r/20191011141115.GA4521@mwanda
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/misc/legousbtower.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
+index 62dab2441ec4f..23061f1526b4e 100644
+--- a/drivers/usb/misc/legousbtower.c
++++ b/drivers/usb/misc/legousbtower.c
+@@ -878,7 +878,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
+- if (result < sizeof(*get_version_reply)) {
++ if (result != sizeof(*get_version_reply)) {
+ if (result >= 0)
+ result = -EIO;
+ dev_err(idev, "get version request failed: %d\n", result);
+--
+2.20.1
+