--- /dev/null
+From 6e66b49392419f3fe134e1be583323ef75da1e4b Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Mon, 9 Mar 2020 21:26:17 -0700
+Subject: blk-mq: Keep set->nr_hw_queues and set->map[].nr_queues in sync
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 6e66b49392419f3fe134e1be583323ef75da1e4b upstream.
+
+blk_mq_map_queues() and multiple .map_queues() implementations expect that
+set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the number of hardware
+queues. Hence set .nr_queues before calling these functions. This patch
+fixes the following kernel warning:
+
+WARNING: CPU: 0 PID: 2501 at include/linux/cpumask.h:137
+Call Trace:
+ blk_mq_run_hw_queue+0x19d/0x350 block/blk-mq.c:1508
+ blk_mq_run_hw_queues+0x112/0x1a0 block/blk-mq.c:1525
+ blk_mq_requeue_work+0x502/0x780 block/blk-mq.c:775
+ process_one_work+0x9af/0x1740 kernel/workqueue.c:2269
+ worker_thread+0x98/0xe40 kernel/workqueue.c:2415
+ kthread+0x361/0x430 kernel/kthread.c:255
+
+Fixes: ed76e329d74a ("blk-mq: abstract out queue map") # v5.0
+Reported-by: syzbot+d44e1b26ce5c3e77458d@syzkaller.appspotmail.com
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Cc: Johannes Thumshirn <jth@kernel.org>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2965,6 +2965,14 @@ static int blk_mq_alloc_rq_maps(struct b
+
+ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+ {
++ /*
++ * blk_mq_map_queues() and multiple .map_queues() implementations
++ * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
++ * number of hardware queues.
++ */
++ if (set->nr_maps == 1)
++ set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
++
+ if (set->ops->map_queues && !is_kdump_kernel()) {
+ int i;
+
--- /dev/null
+From 71811cac8532b2387b3414f7cd8fe9e497482864 Mon Sep 17 00:00:00 2001
+From: Qiujun Huang <hqjagain@gmail.com>
+Date: Sun, 8 Mar 2020 17:45:27 +0800
+Subject: Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
+
+From: Qiujun Huang <hqjagain@gmail.com>
+
+commit 71811cac8532b2387b3414f7cd8fe9e497482864 upstream.
+
+Needn't call 'rfcomm_dlc_put' here, because 'rfcomm_dlc_exists' didn't
+increase dlc->refcnt.
+
+Reported-by: syzbot+4496e82090657320efc6@syzkaller.appspotmail.com
+Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
+Suggested-by: Hillf Danton <hdanton@sina.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/rfcomm/tty.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct so
+ dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
+ if (IS_ERR(dlc))
+ return PTR_ERR(dlc);
+- else if (dlc) {
+- rfcomm_dlc_put(dlc);
++ if (dlc)
+ return -EBUSY;
+- }
+ dlc = rfcomm_dlc_alloc(GFP_KERNEL);
+ if (!dlc)
+ return -ENOMEM;
--- /dev/null
+From b139f8b00db4a8ea75a4174346eafa48041aa489 Mon Sep 17 00:00:00 2001
+From: Qiujun Huang <hqjagain@gmail.com>
+Date: Sun, 29 Mar 2020 16:56:47 +0800
+Subject: fbcon: fix null-ptr-deref in fbcon_switch
+
+From: Qiujun Huang <hqjagain@gmail.com>
+
+commit b139f8b00db4a8ea75a4174346eafa48041aa489 upstream.
+
+Set logo_shown to FBCON_LOGO_CANSHOW when the vc was deallocated.
+
+syzkaller report: https://lkml.org/lkml/2020/3/27/403
+general protection fault, probably for non-canonical address
+0xdffffc000000006c: 0000 [#1] SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000360-0x0000000000000367]
+RIP: 0010:fbcon_switch+0x28f/0x1740
+drivers/video/fbdev/core/fbcon.c:2260
+
+Call Trace:
+redraw_screen+0x2a8/0x770 drivers/tty/vt/vt.c:1008
+vc_do_resize+0xfe7/0x1360 drivers/tty/vt/vt.c:1295
+fbcon_init+0x1221/0x1ab0 drivers/video/fbdev/core/fbcon.c:1219
+visual_init+0x305/0x5c0 drivers/tty/vt/vt.c:1062
+do_bind_con_driver+0x536/0x890 drivers/tty/vt/vt.c:3542
+do_take_over_console+0x453/0x5b0 drivers/tty/vt/vt.c:4122
+do_fbcon_takeover+0x10b/0x210 drivers/video/fbdev/core/fbcon.c:588
+fbcon_fb_registered+0x26b/0x340 drivers/video/fbdev/core/fbcon.c:3259
+do_register_framebuffer drivers/video/fbdev/core/fbmem.c:1664 [inline]
+register_framebuffer+0x56e/0x980 drivers/video/fbdev/core/fbmem.c:1832
+dlfb_usb_probe.cold+0x1743/0x1ba3 drivers/video/fbdev/udlfb.c:1735
+usb_probe_interface+0x310/0x800 drivers/usb/core/driver.c:374
+
+accessing vc_cons[logo_shown].d->vc_top causes the bug.
+
+Reported-by: syzbot+732528bae351682f1f27@syzkaller.appspotmail.com
+Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200329085647.25133-1-hqjagain@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/core/fbcon.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1276,6 +1276,9 @@ finished:
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
++ if (vc->vc_num == logo_shown)
++ logo_shown = FBCON_LOGO_CANSHOW;
++
+ return;
+ }
+
--- /dev/null
+From 987914ab841e2ec281a35b54348ab109b4c0bb4e Mon Sep 17 00:00:00 2001
+From: Avihai Horon <avihaih@mellanox.com>
+Date: Wed, 18 Mar 2020 12:17:41 +0200
+Subject: RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
+
+From: Avihai Horon <avihaih@mellanox.com>
+
+commit 987914ab841e2ec281a35b54348ab109b4c0bb4e upstream.
+
+After a successful allocation of path_rec, num_paths is set to 1, but any
+error after such allocation will leave num_paths uncleared.
+
+This causes to de-referencing a NULL pointer later on. Hence, num_paths
+needs to be set back to 0 if such an error occurs.
+
+The following crash from syzkaller revealed it.
+
+ kasan: CONFIG_KASAN_INLINE enabled
+ kasan: GPF could be caused by NULL-ptr deref or user memory access
+ general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
+ CPU: 0 PID: 357 Comm: syz-executor060 Not tainted 4.18.0+ #311
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+ rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014
+ RIP: 0010:ib_copy_path_rec_to_user+0x94/0x3e0
+ Code: f1 f1 f1 f1 c7 40 0c 00 00 f4 f4 65 48 8b 04 25 28 00 00 00 48 89
+ 45 c8 31 c0 e8 d7 60 24 ff 48 8d 7b 4c 48 89 f8 48 c1 e8 03 <42> 0f b6
+ 14 30 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85
+ RSP: 0018:ffff88006586f980 EFLAGS: 00010207
+ RAX: 0000000000000009 RBX: 0000000000000000 RCX: 1ffff1000d5fe475
+ RDX: ffff8800621e17c0 RSI: ffffffff820d45f9 RDI: 000000000000004c
+ RBP: ffff88006586fa50 R08: ffffed000cb0df73 R09: ffffed000cb0df72
+ R10: ffff88006586fa70 R11: ffffed000cb0df73 R12: 1ffff1000cb0df30
+ R13: ffff88006586fae8 R14: dffffc0000000000 R15: ffff88006aff2200
+ FS: 00000000016fc880(0000) GS:ffff88006d000000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000020000040 CR3: 0000000063fec000 CR4: 00000000000006b0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ ? ib_copy_path_rec_from_user+0xcc0/0xcc0
+ ? __mutex_unlock_slowpath+0xfc/0x670
+ ? wait_for_completion+0x3b0/0x3b0
+ ? ucma_query_route+0x818/0xc60
+ ucma_query_route+0x818/0xc60
+ ? ucma_listen+0x1b0/0x1b0
+ ? sched_clock_cpu+0x18/0x1d0
+ ? sched_clock_cpu+0x18/0x1d0
+ ? ucma_listen+0x1b0/0x1b0
+ ? ucma_write+0x292/0x460
+ ucma_write+0x292/0x460
+ ? ucma_close_id+0x60/0x60
+ ? sched_clock_cpu+0x18/0x1d0
+ ? sched_clock_cpu+0x18/0x1d0
+ __vfs_write+0xf7/0x620
+ ? ucma_close_id+0x60/0x60
+ ? kernel_read+0x110/0x110
+ ? time_hardirqs_on+0x19/0x580
+ ? lock_acquire+0x18b/0x3a0
+ ? finish_task_switch+0xf3/0x5d0
+ ? _raw_spin_unlock_irq+0x29/0x40
+ ? _raw_spin_unlock_irq+0x29/0x40
+ ? finish_task_switch+0x1be/0x5d0
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? security_file_permission+0x172/0x1e0
+ vfs_write+0x192/0x460
+ ksys_write+0xc6/0x1a0
+ ? __ia32_sys_read+0xb0/0xb0
+ ? entry_SYSCALL_64_after_hwframe+0x3e/0xbe
+ ? do_syscall_64+0x1d/0x470
+ do_syscall_64+0x9e/0x470
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices")
+Link: https://lore.kernel.org/r/20200318101741.47211-1-leon@kernel.org
+Signed-off-by: Avihai Horon <avihaih@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/cma.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2938,6 +2938,7 @@ static int cma_resolve_iboe_route(struct
+ err2:
+ kfree(route->path_rec);
+ route->path_rec = NULL;
++ route->num_paths = 0;
+ err1:
+ kfree(work);
+ return ret;
--- /dev/null
+From 32ac9e4399b12d3e54d312a0e0e30ed5cd19bd4e Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@ziepe.ca>
+Date: Thu, 27 Feb 2020 16:36:51 -0400
+Subject: RDMA/cma: Teach lockdep about the order of rtnl and lock
+
+From: Jason Gunthorpe <jgg@mellanox.com>
+
+commit 32ac9e4399b12d3e54d312a0e0e30ed5cd19bd4e upstream.
+
+This lock ordering only happens when bonding is enabled and a certain
+bonding related event fires. However, since it can happen this is a global
+restriction on lock ordering.
+
+Teach lockdep about the order directly and unconditionally so bugs here
+are found quickly.
+
+See https://syzkaller.appspot.com/bug?extid=55de90ab5f44172b0c90
+
+Link: https://lore.kernel.org/r/20200227203651.GA27185@ziepe.ca
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/cma.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4746,6 +4746,19 @@ static int __init cma_init(void)
+ {
+ int ret;
+
++ /*
++ * There is a rare lock ordering dependency in cma_netdev_callback()
++ * that only happens when bonding is enabled. Teach lockdep that rtnl
++ * must never be nested under lock so it can find these without having
++ * to test with bonding.
++ */
++ if (IS_ENABLED(CONFIG_LOCKDEP)) {
++ rtnl_lock();
++ mutex_lock(&lock);
++ mutex_unlock(&lock);
++ rtnl_unlock();
++ }
++
+ cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
+ if (!cma_wq)
+ return -ENOMEM;
--- /dev/null
+From 33fb27fd54465c74cbffba6315b2f043e90cec4c Mon Sep 17 00:00:00 2001
+From: Bernard Metzler <bmt@zurich.ibm.com>
+Date: Fri, 28 Feb 2020 18:35:34 +0100
+Subject: RDMA/siw: Fix passive connection establishment
+
+From: Bernard Metzler <bmt@zurich.ibm.com>
+
+commit 33fb27fd54465c74cbffba6315b2f043e90cec4c upstream.
+
+Holding the rtnl_lock while iterating a devices interface address list
+potentially causes deadlocks with the cma_netdev_callback. While this was
+implemented to limit the scope of a wildcard listen to addresses of the
+current device only, a better solution limits the scope of the socket to
+the device. This completely avoiding locking, and also results in
+significant code simplification.
+
+Fixes: c421651fa229 ("RDMA/siw: Add missing rtnl_lock around access to ifa")
+Link: https://lore.kernel.org/r/20200228173534.26815-1-bmt@zurich.ibm.com
+Reported-by: syzbot+55de90ab5f44172b0c90@syzkaller.appspotmail.com
+Suggested-by: Jason Gunthorpe <jgg@ziepe.ca>
+Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/sw/siw/siw_cm.c | 137 ++++++++-----------------------------
+ 1 file changed, 31 insertions(+), 106 deletions(-)
+
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1769,14 +1769,23 @@ int siw_reject(struct iw_cm_id *id, cons
+ return 0;
+ }
+
+-static int siw_listen_address(struct iw_cm_id *id, int backlog,
+- struct sockaddr *laddr, int addr_family)
++/*
++ * siw_create_listen - Create resources for a listener's IWCM ID @id
++ *
++ * Starts listen on the socket address id->local_addr.
++ *
++ */
++int siw_create_listen(struct iw_cm_id *id, int backlog)
+ {
+ struct socket *s;
+ struct siw_cep *cep = NULL;
+ struct siw_device *sdev = to_siw_dev(id->device);
++ int addr_family = id->local_addr.ss_family;
+ int rv = 0, s_val;
+
++ if (addr_family != AF_INET && addr_family != AF_INET6)
++ return -EAFNOSUPPORT;
++
+ rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (rv < 0)
+ return rv;
+@@ -1791,9 +1800,25 @@ static int siw_listen_address(struct iw_
+ siw_dbg(id->device, "setsockopt error: %d\n", rv);
+ goto error;
+ }
+- rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
+- sizeof(struct sockaddr_in) :
+- sizeof(struct sockaddr_in6));
++ if (addr_family == AF_INET) {
++ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
++
++ /* For wildcard addr, limit binding to current device only */
++ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
++ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
++
++ rv = s->ops->bind(s, (struct sockaddr *)laddr,
++ sizeof(struct sockaddr_in));
++ } else {
++ struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
++
++ /* For wildcard addr, limit binding to current device only */
++ if (ipv6_addr_any(&laddr->sin6_addr))
++ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
++
++ rv = s->ops->bind(s, (struct sockaddr *)laddr,
++ sizeof(struct sockaddr_in6));
++ }
+ if (rv) {
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
+ goto error;
+@@ -1852,7 +1877,7 @@ static int siw_listen_address(struct iw_
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = SIW_EPSTATE_LISTENING;
+
+- siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
++ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
+
+ return 0;
+
+@@ -1910,106 +1935,6 @@ static void siw_drop_listeners(struct iw
+ }
+ }
+
+-/*
+- * siw_create_listen - Create resources for a listener's IWCM ID @id
+- *
+- * Listens on the socket address id->local_addr.
+- *
+- * If the listener's @id provides a specific local IP address, at most one
+- * listening socket is created and associated with @id.
+- *
+- * If the listener's @id provides the wildcard (zero) local IP address,
+- * a separate listen is performed for each local IP address of the device
+- * by creating a listening socket and binding to that local IP address.
+- *
+- */
+-int siw_create_listen(struct iw_cm_id *id, int backlog)
+-{
+- struct net_device *dev = to_siw_dev(id->device)->netdev;
+- int rv = 0, listeners = 0;
+-
+- siw_dbg(id->device, "backlog %d\n", backlog);
+-
+- /*
+- * For each attached address of the interface, create a
+- * listening socket, if id->local_addr is the wildcard
+- * IP address or matches the IP address.
+- */
+- if (id->local_addr.ss_family == AF_INET) {
+- struct in_device *in_dev = in_dev_get(dev);
+- struct sockaddr_in s_laddr;
+- const struct in_ifaddr *ifa;
+-
+- if (!in_dev) {
+- rv = -ENODEV;
+- goto out;
+- }
+- memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
+-
+- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
+-
+- rtnl_lock();
+- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
+- if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
+- s_laddr.sin_addr.s_addr == ifa->ifa_address) {
+- s_laddr.sin_addr.s_addr = ifa->ifa_address;
+-
+- rv = siw_listen_address(id, backlog,
+- (struct sockaddr *)&s_laddr,
+- AF_INET);
+- if (!rv)
+- listeners++;
+- }
+- }
+- rtnl_unlock();
+- in_dev_put(in_dev);
+- } else if (id->local_addr.ss_family == AF_INET6) {
+- struct inet6_dev *in6_dev = in6_dev_get(dev);
+- struct inet6_ifaddr *ifp;
+- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
+-
+- if (!in6_dev) {
+- rv = -ENODEV;
+- goto out;
+- }
+- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
+-
+- rtnl_lock();
+- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+- if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+- continue;
+- if (ipv6_addr_any(&s_laddr->sin6_addr) ||
+- ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
+- struct sockaddr_in6 bind_addr = {
+- .sin6_family = AF_INET6,
+- .sin6_port = s_laddr->sin6_port,
+- .sin6_flowinfo = 0,
+- .sin6_addr = ifp->addr,
+- .sin6_scope_id = dev->ifindex };
+-
+- rv = siw_listen_address(id, backlog,
+- (struct sockaddr *)&bind_addr,
+- AF_INET6);
+- if (!rv)
+- listeners++;
+- }
+- }
+- rtnl_unlock();
+- in6_dev_put(in6_dev);
+- } else {
+- rv = -EAFNOSUPPORT;
+- }
+-out:
+- if (listeners)
+- rv = 0;
+- else if (!rv)
+- rv = -EINVAL;
+-
+- siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
+-
+- return rv;
+-}
+-
+ int siw_destroy_listen(struct iw_cm_id *id)
+ {
+ if (!id->provider_data) {
--- /dev/null
+From 7c11910783a1ea17e88777552ef146cace607b3c Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@ziepe.ca>
+Date: Tue, 18 Feb 2020 15:45:38 -0400
+Subject: RDMA/ucma: Put a lock around every call to the rdma_cm layer
+
+From: Jason Gunthorpe <jgg@mellanox.com>
+
+commit 7c11910783a1ea17e88777552ef146cace607b3c upstream.
+
+The rdma_cm must be used single threaded.
+
+This appears to be a bug in the design, as it does have lots of locking
+that seems like it should allow concurrency. However, when it is all said
+and done every single place that uses the cma_exch() scheme is broken, and
+all the unlocked reads from the ucma of the cm_id data are wrong too.
+
+syzkaller has been finding endless bugs related to this.
+
+Fixing this in any elegant way is some enormous amount of work. Take a
+very big hammer and put a mutex around everything to do with the
+ucma_context at the top of every syscall.
+
+Fixes: 75216638572f ("RDMA/cma: Export rdma cm interface to userspace")
+Link: https://lore.kernel.org/r/20200218210432.GA31966@ziepe.ca
+Reported-by: syzbot+adb15cf8c2798e4e0db4@syzkaller.appspotmail.com
+Reported-by: syzbot+e5579222b6a3edd96522@syzkaller.appspotmail.com
+Reported-by: syzbot+4b628fcc748474003457@syzkaller.appspotmail.com
+Reported-by: syzbot+29ee8f76017ce6cf03da@syzkaller.appspotmail.com
+Reported-by: syzbot+6956235342b7317ec564@syzkaller.appspotmail.com
+Reported-by: syzbot+b358909d8d01556b790b@syzkaller.appspotmail.com
+Reported-by: syzbot+6b46b135602a3f3ac99e@syzkaller.appspotmail.com
+Reported-by: syzbot+8458d13b13562abf6b77@syzkaller.appspotmail.com
+Reported-by: syzbot+bd034f3fdc0402e942ed@syzkaller.appspotmail.com
+Reported-by: syzbot+c92378b32760a4eef756@syzkaller.appspotmail.com
+Reported-by: syzbot+68b44a1597636e0b342c@syzkaller.appspotmail.com
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/ucma.c | 49 +++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 47 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -91,6 +91,7 @@ struct ucma_context {
+
+ struct ucma_file *file;
+ struct rdma_cm_id *cm_id;
++ struct mutex mutex;
+ u64 uid;
+
+ struct list_head list;
+@@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_c
+ init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
+ ctx->file = file;
++ mutex_init(&ctx->mutex);
+
+ if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
+ goto error;
+@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_con
+ }
+
+ events_reported = ctx->events_reported;
++ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
+ }
+@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
++
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_fil
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct uc
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct u
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ memset(&resp, 0, sizeof resp);
+ addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
+ memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct u
+ ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+
+ out:
++ mutex_unlock(&ctx->mutex);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_fi
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ switch (cmd.option) {
+ case RDMA_USER_CM_QUERY_ADDR:
+ ret = ucma_query_addr(ctx, response, out_len);
+@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_fi
+ ret = -ENOSYS;
+ break;
+ }
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_
+ return PTR_ERR(ctx);
+
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_connect(ctx->cm_id, &conn_param);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_f
+
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+ cmd.backlog : max_backlog;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_listen(ctx->cm_id, ctx->backlog);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_f
+ if (cmd.conn_param.valid) {
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ mutex_lock(&file->mut);
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
++ mutex_unlock(&ctx->mutex);
+ if (!ret)
+ ctx->uid = cmd.uid;
+ mutex_unlock(&file->mut);
+- } else
++ } else {
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+-
++ mutex_unlock(&ctx->mutex);
++ }
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_f
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct uc
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_disconnect(ctx->cm_id);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct
+ resp.qp_attr_mask = 0;
+ memset(&qp_attr, 0, sizeof qp_attr);
+ qp_attr.qp_state = cmd.qp_state;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto out;
+
+@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_
+ struct sa_path_rec opa;
+
+ sa_convert_path_ib_to_opa(&opa, &sa_path);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
++ mutex_unlock(&ctx->mutex);
+ } else {
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
++ mutex_unlock(&ctx->mutex);
+ }
+ if (ret)
+ return ret;
+@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct
+
+ switch (level) {
+ case RDMA_OPTION_ID:
++ mutex_lock(&ctx->mutex);
+ ret = ucma_set_option_id(ctx, optname, optval, optlen);
++ mutex_unlock(&ctx->mutex);
+ break;
+ case RDMA_OPTION_IB:
+ ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_f
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct
+ mc->join_state = join_state;
+ mc->uid = cmd->uid;
+ memcpy(&mc->addr, addr, cmd->addr_size);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto err2;
+
+@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(stru
+ goto out;
+ }
+
++ mutex_lock(&mc->ctx->mutex);
+ rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
++ mutex_unlock(&mc->ctx->mutex);
++
+ mutex_lock(&mc->ctx->file->mut);
+ ucma_cleanup_mc_events(mc);
+ list_del(&mc->list);
uapi-rename-ext2_swab-to-swab-and-share-globally-in-swab.h.patch
slub-improve-bit-diffusion-for-freelist-ptr-obfuscation.patch
include-uapi-linux-swab.h-fix-userspace-breakage-use-__bits_per_long-for-swap.patch
+ubi-fastmap-free-unused-fastmap-anchor-peb-during-detach.patch
+rdma-ucma-put-a-lock-around-every-call-to-the-rdma_cm-layer.patch
+rdma-cma-teach-lockdep-about-the-order-of-rtnl-and-lock.patch
+rdma-siw-fix-passive-connection-establishment.patch
+bluetooth-rfcomm-fix-odebug-bug-in-rfcomm_dev_ioctl.patch
+rdma-cm-update-num_paths-in-cma_resolve_iboe_route-error-flow.patch
+blk-mq-keep-set-nr_hw_queues-and-set-map.nr_queues-in-sync.patch
+fbcon-fix-null-ptr-deref-in-fbcon_switch.patch
tools-accounting-getdelays.c-fix-netlink-attribute-length.patch
hwrng-imx-rngc-fix-an-error-path.patch
acpi-pm-add-acpi_register_wakeup_handler.patch
--- /dev/null
+From c16f39d14a7e0ec59881fbdb22ae494907534384 Mon Sep 17 00:00:00 2001
+From: Hou Tao <houtao1@huawei.com>
+Date: Mon, 10 Feb 2020 21:26:34 +0800
+Subject: ubi: fastmap: Free unused fastmap anchor peb during detach
+
+From: Hou Tao <houtao1@huawei.com>
+
+commit c16f39d14a7e0ec59881fbdb22ae494907534384 upstream.
+
+When CONFIG_MTD_UBI_FASTMAP is enabled, fm_anchor will be assigned
+a free PEB during ubi_wl_init() or ubi_update_fastmap(). However
+if fastmap is not used or disabled on the MTD device, ubi_wl_entry
+related with the PEB will not be freed during detach.
+
+So Fix it by freeing the unused fastmap anchor during detach.
+
+Fixes: f9c34bb52997 ("ubi: Fix producing anchor PEBs")
+Reported-by: syzbot+f317896aae32eb281a58@syzkaller.appspotmail.com
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/fastmap-wl.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_
+ return victim;
+ }
+
++static inline void return_unused_peb(struct ubi_device *ubi,
++ struct ubi_wl_entry *e)
++{
++ wl_tree_add(e, &ubi->free);
++ ubi->free_count++;
++}
++
+ /**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+@@ -52,8 +59,7 @@ static void return_unused_pool_pebs(stru
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+- wl_tree_add(e, &ubi->free);
+- ubi->free_count++;
++ return_unused_peb(ubi, e);
+ }
+ }
+
+@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
++ if (ubi->fm_anchor) {
++ return_unused_peb(ubi, ubi->fm_anchor);
++ ubi->fm_anchor = NULL;
++ }
++
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);