--- /dev/null
+From b139f8b00db4a8ea75a4174346eafa48041aa489 Mon Sep 17 00:00:00 2001
+From: Qiujun Huang <hqjagain@gmail.com>
+Date: Sun, 29 Mar 2020 16:56:47 +0800
+Subject: fbcon: fix null-ptr-deref in fbcon_switch
+
+From: Qiujun Huang <hqjagain@gmail.com>
+
+commit b139f8b00db4a8ea75a4174346eafa48041aa489 upstream.
+
+Set logo_shown to FBCON_LOGO_CANSHOW when the vc was deallocated.
+
+syzkaller report: https://lkml.org/lkml/2020/3/27/403
+general protection fault, probably for non-canonical address
+0xdffffc000000006c: 0000 [#1] SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000360-0x0000000000000367]
+RIP: 0010:fbcon_switch+0x28f/0x1740
+drivers/video/fbdev/core/fbcon.c:2260
+
+Call Trace:
+redraw_screen+0x2a8/0x770 drivers/tty/vt/vt.c:1008
+vc_do_resize+0xfe7/0x1360 drivers/tty/vt/vt.c:1295
+fbcon_init+0x1221/0x1ab0 drivers/video/fbdev/core/fbcon.c:1219
+visual_init+0x305/0x5c0 drivers/tty/vt/vt.c:1062
+do_bind_con_driver+0x536/0x890 drivers/tty/vt/vt.c:3542
+do_take_over_console+0x453/0x5b0 drivers/tty/vt/vt.c:4122
+do_fbcon_takeover+0x10b/0x210 drivers/video/fbdev/core/fbcon.c:588
+fbcon_fb_registered+0x26b/0x340 drivers/video/fbdev/core/fbcon.c:3259
+do_register_framebuffer drivers/video/fbdev/core/fbmem.c:1664 [inline]
+register_framebuffer+0x56e/0x980 drivers/video/fbdev/core/fbmem.c:1832
+dlfb_usb_probe.cold+0x1743/0x1ba3 drivers/video/fbdev/udlfb.c:1735
+usb_probe_interface+0x310/0x800 drivers/usb/core/driver.c:374
+
+accessing vc_cons[logo_shown].d->vc_top causes the bug.
+
+Reported-by: syzbot+732528bae351682f1f27@syzkaller.appspotmail.com
+Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200329085647.25133-1-hqjagain@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/core/fbcon.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1243,6 +1243,9 @@ finished:
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
++ if (vc->vc_num == logo_shown)
++ logo_shown = FBCON_LOGO_CANSHOW;
++
+ return;
+ }
+
--- /dev/null
+From 987914ab841e2ec281a35b54348ab109b4c0bb4e Mon Sep 17 00:00:00 2001
+From: Avihai Horon <avihaih@mellanox.com>
+Date: Wed, 18 Mar 2020 12:17:41 +0200
+Subject: RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
+
+From: Avihai Horon <avihaih@mellanox.com>
+
+commit 987914ab841e2ec281a35b54348ab109b4c0bb4e upstream.
+
+After a successful allocation of path_rec, num_paths is set to 1, but any
+error after such allocation will leave num_paths uncleared.
+
+This causes to de-referencing a NULL pointer later on. Hence, num_paths
+needs to be set back to 0 if such an error occurs.
+
+The following crash from syzkaller revealed it.
+
+ kasan: CONFIG_KASAN_INLINE enabled
+ kasan: GPF could be caused by NULL-ptr deref or user memory access
+ general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI
+ CPU: 0 PID: 357 Comm: syz-executor060 Not tainted 4.18.0+ #311
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+ rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014
+ RIP: 0010:ib_copy_path_rec_to_user+0x94/0x3e0
+ Code: f1 f1 f1 f1 c7 40 0c 00 00 f4 f4 65 48 8b 04 25 28 00 00 00 48 89
+ 45 c8 31 c0 e8 d7 60 24 ff 48 8d 7b 4c 48 89 f8 48 c1 e8 03 <42> 0f b6
+ 14 30 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85
+ RSP: 0018:ffff88006586f980 EFLAGS: 00010207
+ RAX: 0000000000000009 RBX: 0000000000000000 RCX: 1ffff1000d5fe475
+ RDX: ffff8800621e17c0 RSI: ffffffff820d45f9 RDI: 000000000000004c
+ RBP: ffff88006586fa50 R08: ffffed000cb0df73 R09: ffffed000cb0df72
+ R10: ffff88006586fa70 R11: ffffed000cb0df73 R12: 1ffff1000cb0df30
+ R13: ffff88006586fae8 R14: dffffc0000000000 R15: ffff88006aff2200
+ FS: 00000000016fc880(0000) GS:ffff88006d000000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000020000040 CR3: 0000000063fec000 CR4: 00000000000006b0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ ? ib_copy_path_rec_from_user+0xcc0/0xcc0
+ ? __mutex_unlock_slowpath+0xfc/0x670
+ ? wait_for_completion+0x3b0/0x3b0
+ ? ucma_query_route+0x818/0xc60
+ ucma_query_route+0x818/0xc60
+ ? ucma_listen+0x1b0/0x1b0
+ ? sched_clock_cpu+0x18/0x1d0
+ ? sched_clock_cpu+0x18/0x1d0
+ ? ucma_listen+0x1b0/0x1b0
+ ? ucma_write+0x292/0x460
+ ucma_write+0x292/0x460
+ ? ucma_close_id+0x60/0x60
+ ? sched_clock_cpu+0x18/0x1d0
+ ? sched_clock_cpu+0x18/0x1d0
+ __vfs_write+0xf7/0x620
+ ? ucma_close_id+0x60/0x60
+ ? kernel_read+0x110/0x110
+ ? time_hardirqs_on+0x19/0x580
+ ? lock_acquire+0x18b/0x3a0
+ ? finish_task_switch+0xf3/0x5d0
+ ? _raw_spin_unlock_irq+0x29/0x40
+ ? _raw_spin_unlock_irq+0x29/0x40
+ ? finish_task_switch+0x1be/0x5d0
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? security_file_permission+0x172/0x1e0
+ vfs_write+0x192/0x460
+ ksys_write+0xc6/0x1a0
+ ? __ia32_sys_read+0xb0/0xb0
+ ? entry_SYSCALL_64_after_hwframe+0x3e/0xbe
+ ? do_syscall_64+0x1d/0x470
+ do_syscall_64+0x9e/0x470
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices")
+Link: https://lore.kernel.org/r/20200318101741.47211-1-leon@kernel.org
+Signed-off-by: Avihai Horon <avihaih@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/cma.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2753,6 +2753,7 @@ static int cma_resolve_iboe_route(struct
+ err2:
+ kfree(route->path_rec);
+ route->path_rec = NULL;
++ route->num_paths = 0;
+ err1:
+ kfree(work);
+ return ret;
--- /dev/null
+From 7c11910783a1ea17e88777552ef146cace607b3c Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@ziepe.ca>
+Date: Tue, 18 Feb 2020 15:45:38 -0400
+Subject: RDMA/ucma: Put a lock around every call to the rdma_cm layer
+
+From: Jason Gunthorpe <jgg@mellanox.com>
+
+commit 7c11910783a1ea17e88777552ef146cace607b3c upstream.
+
+The rdma_cm must be used single threaded.
+
+This appears to be a bug in the design, as it does have lots of locking
+that seems like it should allow concurrency. However, when it is all said
+and done every single place that uses the cma_exch() scheme is broken, and
+all the unlocked reads from the ucma of the cm_id data are wrong too.
+
+syzkaller has been finding endless bugs related to this.
+
+Fixing this in any elegant way is some enormous amount of work. Take a
+very big hammer and put a mutex around everything to do with the
+ucma_context at the top of every syscall.
+
+Fixes: 75216638572f ("RDMA/cma: Export rdma cm interface to userspace")
+Link: https://lore.kernel.org/r/20200218210432.GA31966@ziepe.ca
+Reported-by: syzbot+adb15cf8c2798e4e0db4@syzkaller.appspotmail.com
+Reported-by: syzbot+e5579222b6a3edd96522@syzkaller.appspotmail.com
+Reported-by: syzbot+4b628fcc748474003457@syzkaller.appspotmail.com
+Reported-by: syzbot+29ee8f76017ce6cf03da@syzkaller.appspotmail.com
+Reported-by: syzbot+6956235342b7317ec564@syzkaller.appspotmail.com
+Reported-by: syzbot+b358909d8d01556b790b@syzkaller.appspotmail.com
+Reported-by: syzbot+6b46b135602a3f3ac99e@syzkaller.appspotmail.com
+Reported-by: syzbot+8458d13b13562abf6b77@syzkaller.appspotmail.com
+Reported-by: syzbot+bd034f3fdc0402e942ed@syzkaller.appspotmail.com
+Reported-by: syzbot+c92378b32760a4eef756@syzkaller.appspotmail.com
+Reported-by: syzbot+68b44a1597636e0b342c@syzkaller.appspotmail.com
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/ucma.c | 49 +++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 47 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -89,6 +89,7 @@ struct ucma_context {
+
+ struct ucma_file *file;
+ struct rdma_cm_id *cm_id;
++ struct mutex mutex;
+ u64 uid;
+
+ struct list_head list;
+@@ -215,6 +216,7 @@ static struct ucma_context *ucma_alloc_c
+ init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
+ ctx->file = file;
++ mutex_init(&ctx->mutex);
+
+ mutex_lock(&mut);
+ ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+@@ -596,6 +598,7 @@ static int ucma_free_ctx(struct ucma_con
+ }
+
+ events_reported = ctx->events_reported;
++ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
+ }
+@@ -665,7 +668,10 @@ static ssize_t ucma_bind_ip(struct ucma_
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
++
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -688,7 +694,9 @@ static ssize_t ucma_bind(struct ucma_fil
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -712,8 +720,10 @@ static ssize_t ucma_resolve_ip(struct uc
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -738,8 +748,10 @@ static ssize_t ucma_resolve_addr(struct
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -759,7 +771,9 @@ static ssize_t ucma_resolve_route(struct
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -848,6 +862,7 @@ static ssize_t ucma_query_route(struct u
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ memset(&resp, 0, sizeof resp);
+ addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
+ memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+@@ -871,6 +886,7 @@ static ssize_t ucma_query_route(struct u
+ ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+
+ out:
++ mutex_unlock(&ctx->mutex);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+@@ -1022,6 +1038,7 @@ static ssize_t ucma_query(struct ucma_fi
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ switch (cmd.option) {
+ case RDMA_USER_CM_QUERY_ADDR:
+ ret = ucma_query_addr(ctx, response, out_len);
+@@ -1036,6 +1053,7 @@ static ssize_t ucma_query(struct ucma_fi
+ ret = -ENOSYS;
+ break;
+ }
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1076,7 +1094,9 @@ static ssize_t ucma_connect(struct ucma_
+ return PTR_ERR(ctx);
+
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_connect(ctx->cm_id, &conn_param);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1097,7 +1117,9 @@ static ssize_t ucma_listen(struct ucma_f
+
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+ cmd.backlog : max_backlog;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_listen(ctx->cm_id, ctx->backlog);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1120,13 +1142,17 @@ static ssize_t ucma_accept(struct ucma_f
+ if (cmd.conn_param.valid) {
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ mutex_lock(&file->mut);
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
++ mutex_unlock(&ctx->mutex);
+ if (!ret)
+ ctx->uid = cmd.uid;
+ mutex_unlock(&file->mut);
+- } else
++ } else {
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+-
++ mutex_unlock(&ctx->mutex);
++ }
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1145,7 +1171,9 @@ static ssize_t ucma_reject(struct ucma_f
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1164,7 +1192,9 @@ static ssize_t ucma_disconnect(struct uc
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_disconnect(ctx->cm_id);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1195,7 +1225,9 @@ static ssize_t ucma_init_qp_attr(struct
+ resp.qp_attr_mask = 0;
+ memset(&qp_attr, 0, sizeof qp_attr);
+ qp_attr.qp_state = cmd.qp_state;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto out;
+
+@@ -1274,9 +1306,13 @@ static int ucma_set_ib_path(struct ucma_
+ struct sa_path_rec opa;
+
+ sa_convert_path_ib_to_opa(&opa, &sa_path);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
++ mutex_unlock(&ctx->mutex);
+ } else {
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
++ mutex_unlock(&ctx->mutex);
+ }
+ if (ret)
+ return ret;
+@@ -1309,7 +1345,9 @@ static int ucma_set_option_level(struct
+
+ switch (level) {
+ case RDMA_OPTION_ID:
++ mutex_lock(&ctx->mutex);
+ ret = ucma_set_option_id(ctx, optname, optval, optlen);
++ mutex_unlock(&ctx->mutex);
+ break;
+ case RDMA_OPTION_IB:
+ ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+@@ -1369,8 +1407,10 @@ static ssize_t ucma_notify(struct ucma_f
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1413,8 +1453,10 @@ static ssize_t ucma_process_join(struct
+ mc->join_state = join_state;
+ mc->uid = cmd->uid;
+ memcpy(&mc->addr, addr, cmd->addr_size);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto err2;
+
+@@ -1518,7 +1560,10 @@ static ssize_t ucma_leave_multicast(stru
+ goto out;
+ }
+
++ mutex_lock(&mc->ctx->mutex);
+ rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
++ mutex_unlock(&mc->ctx->mutex);
++
+ mutex_lock(&mc->ctx->file->mut);
+ ucma_cleanup_mc_events(mc);
+ list_del(&mc->list);