]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Mar 2019 20:36:16 +0000 (05:36 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Mar 2019 20:36:16 +0000 (05:36 +0900)
added patches:
aio-simplify-and-fix-fget-fput-for-io_submit.patch
bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch
bluetooth-hci_ldisc-initialize-hci_dev-before-open.patch
bluetooth-hci_ldisc-postpone-hci_uart_proto_ready-bit-set-in-hci_uart_set_proto.patch
bluetooth-hci_uart-check-if-socket-buffer-is-err_ptr-in-h4_recv_buf.patch
drm-vkms-fix-flush_work-without-init_work.patch
f2fs-fix-to-avoid-deadlock-of-atomic-file-operations.patch
locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch
loop-access-lo_backing_file-only-when-the-loop-device-is-lo_bound.patch
media-v4l2-ctrls.c-uvc-zero-v4l2_event.patch
mm-mempolicy-fix-uninit-memory-access.patch
netfilter-ebtables-remove-bugprint-messages.patch
rdma-cma-rollback-source-ip-address-if-failing-to-acquire-device.patch
x86-unwind-add-hardcoded-orc-entry-for-null.patch
x86-unwind-handle-null-pointer-calls-better-in-frame-unwinder.patch

16 files changed:
queue-5.0/aio-simplify-and-fix-fget-fput-for-io_submit.patch [new file with mode: 0644]
queue-5.0/bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch [new file with mode: 0644]
queue-5.0/bluetooth-hci_ldisc-initialize-hci_dev-before-open.patch [new file with mode: 0644]
queue-5.0/bluetooth-hci_ldisc-postpone-hci_uart_proto_ready-bit-set-in-hci_uart_set_proto.patch [new file with mode: 0644]
queue-5.0/bluetooth-hci_uart-check-if-socket-buffer-is-err_ptr-in-h4_recv_buf.patch [new file with mode: 0644]
queue-5.0/drm-vkms-fix-flush_work-without-init_work.patch [new file with mode: 0644]
queue-5.0/f2fs-fix-to-avoid-deadlock-of-atomic-file-operations.patch [new file with mode: 0644]
queue-5.0/locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch [new file with mode: 0644]
queue-5.0/loop-access-lo_backing_file-only-when-the-loop-device-is-lo_bound.patch [new file with mode: 0644]
queue-5.0/media-v4l2-ctrls.c-uvc-zero-v4l2_event.patch [new file with mode: 0644]
queue-5.0/mm-mempolicy-fix-uninit-memory-access.patch [new file with mode: 0644]
queue-5.0/netfilter-ebtables-remove-bugprint-messages.patch [new file with mode: 0644]
queue-5.0/rdma-cma-rollback-source-ip-address-if-failing-to-acquire-device.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/x86-unwind-add-hardcoded-orc-entry-for-null.patch [new file with mode: 0644]
queue-5.0/x86-unwind-handle-null-pointer-calls-better-in-frame-unwinder.patch [new file with mode: 0644]

diff --git a/queue-5.0/aio-simplify-and-fix-fget-fput-for-io_submit.patch b/queue-5.0/aio-simplify-and-fix-fget-fput-for-io_submit.patch
new file mode 100644 (file)
index 0000000..497336d
--- /dev/null
@@ -0,0 +1,309 @@
+From 84c4e1f89fefe70554da0ab33be72c9be7994379 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 3 Mar 2019 14:23:33 -0800
+Subject: aio: simplify - and fix - fget/fput for io_submit()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 84c4e1f89fefe70554da0ab33be72c9be7994379 upstream.
+
+Al Viro root-caused a race where the IOCB_CMD_POLL handling of
+fget/fput() could cause us to access the file pointer after it had
+already been freed:
+
+ "In more details - normally IOCB_CMD_POLL handling looks so:
+
+   1) io_submit(2) allocates aio_kiocb instance and passes it to
+      aio_poll()
+
+   2) aio_poll() resolves the descriptor to struct file by req->file =
+      fget(iocb->aio_fildes)
+
+   3) aio_poll() sets ->woken to false and raises ->ki_refcnt of that
+      aio_kiocb to 2 (bumps by 1, that is).
+
+   4) aio_poll() calls vfs_poll(). After sanity checks (basically,
+      "poll_wait() had been called and only once") it locks the queue.
+      That's what the extra reference to iocb had been for - we know we
+      can safely access it.
+
+   5) With queue locked, we check if ->woken has already been set to
+      true (by aio_poll_wake()) and, if it had been, we unlock the
+      queue, drop a reference to aio_kiocb and bugger off - at that
+      point it's a responsibility to aio_poll_wake() and the stuff
+      called/scheduled by it. That code will drop the reference to file
+      in req->file, along with the other reference to our aio_kiocb.
+
+   6) otherwise, we see whether we need to wait. If we do, we unlock the
+      queue, drop one reference to aio_kiocb and go away - eventual
+      wakeup (or cancel) will deal with the reference to file and with
+      the other reference to aio_kiocb
+
+   7) otherwise we remove ourselves from waitqueue (still under the
+      queue lock), so that wakeup won't get us. No async activity will
+      be happening, so we can safely drop req->file and iocb ourselves.
+
+  If wakeup happens while we are in vfs_poll(), we are fine - aio_kiocb
+  won't get freed under us, so we can do all the checks and locking
+  safely. And we don't touch ->file if we detect that case.
+
+  However, vfs_poll() most certainly *does* touch the file it had been
+  given. So wakeup coming while we are still in ->poll() might end up
+  doing fput() on that file. That case is not too rare, and usually we
+  are saved by the still present reference from descriptor table - that
+  fput() is not the final one.
+
+  But if another thread closes that descriptor right after our fget()
+  and wakeup does happen before ->poll() returns, we are in trouble -
+  final fput() done while we are in the middle of a method:
+
+Al also wrote a patch to take an extra reference to the file descriptor
+to fix this, but I instead suggested we just streamline the whole file
+pointer handling by submit_io() so that the generic aio submission code
+simply keeps the file pointer around until the aio has completed.
+
+Fixes: bfe4037e722e ("aio: implement IOCB_CMD_POLL")
+Acked-by: Al Viro <viro@zeniv.linux.org.uk>
+Reported-by: syzbot+503d4cc169fcec1cb18c@syzkaller.appspotmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c           |   72 +++++++++++++++++++++--------------------------------
+ include/linux/fs.h |    8 +++++
+ 2 files changed, 36 insertions(+), 44 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -167,9 +167,13 @@ struct kioctx {
+       unsigned                id;
+ };
++/*
++ * First field must be the file pointer in all the
++ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
++ */
+ struct fsync_iocb {
+-      struct work_struct      work;
+       struct file             *file;
++      struct work_struct      work;
+       bool                    datasync;
+ };
+@@ -183,8 +187,15 @@ struct poll_iocb {
+       struct work_struct      work;
+ };
++/*
++ * NOTE! Each of the iocb union members has the file pointer
++ * as the first entry in their struct definition. So you can
++ * access the file pointer through any of the sub-structs,
++ * or directly as just 'ki_filp' in this struct.
++ */
+ struct aio_kiocb {
+       union {
++              struct file             *ki_filp;
+               struct kiocb            rw;
+               struct fsync_iocb       fsync;
+               struct poll_iocb        poll;
+@@ -1060,6 +1071,8 @@ static inline void iocb_put(struct aio_k
+ {
+       if (refcount_read(&iocb->ki_refcnt) == 0 ||
+           refcount_dec_and_test(&iocb->ki_refcnt)) {
++              if (iocb->ki_filp)
++                      fput(iocb->ki_filp);
+               percpu_ref_put(&iocb->ki_ctx->reqs);
+               kmem_cache_free(kiocb_cachep, iocb);
+       }
+@@ -1424,7 +1437,6 @@ static void aio_complete_rw(struct kiocb
+               file_end_write(kiocb->ki_filp);
+       }
+-      fput(kiocb->ki_filp);
+       aio_complete(iocb, res, res2);
+ }
+@@ -1432,9 +1444,6 @@ static int aio_prep_rw(struct kiocb *req
+ {
+       int ret;
+-      req->ki_filp = fget(iocb->aio_fildes);
+-      if (unlikely(!req->ki_filp))
+-              return -EBADF;
+       req->ki_complete = aio_complete_rw;
+       req->private = NULL;
+       req->ki_pos = iocb->aio_offset;
+@@ -1451,7 +1460,7 @@ static int aio_prep_rw(struct kiocb *req
+               ret = ioprio_check_cap(iocb->aio_reqprio);
+               if (ret) {
+                       pr_debug("aio ioprio check cap error: %d\n", ret);
+-                      goto out_fput;
++                      return ret;
+               }
+               req->ki_ioprio = iocb->aio_reqprio;
+@@ -1460,14 +1469,10 @@ static int aio_prep_rw(struct kiocb *req
+       ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
+       if (unlikely(ret))
+-              goto out_fput;
++              return ret;
+       req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
+       return 0;
+-
+-out_fput:
+-      fput(req->ki_filp);
+-      return ret;
+ }
+ static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
+@@ -1521,24 +1526,19 @@ static ssize_t aio_read(struct kiocb *re
+       if (ret)
+               return ret;
+       file = req->ki_filp;
+-
+-      ret = -EBADF;
+       if (unlikely(!(file->f_mode & FMODE_READ)))
+-              goto out_fput;
++              return -EBADF;
+       ret = -EINVAL;
+       if (unlikely(!file->f_op->read_iter))
+-              goto out_fput;
++              return -EINVAL;
+       ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
+       if (ret)
+-              goto out_fput;
++              return ret;
+       ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
+       if (!ret)
+               aio_rw_done(req, call_read_iter(file, req, &iter));
+       kfree(iovec);
+-out_fput:
+-      if (unlikely(ret))
+-              fput(file);
+       return ret;
+ }
+@@ -1555,16 +1555,14 @@ static ssize_t aio_write(struct kiocb *r
+               return ret;
+       file = req->ki_filp;
+-      ret = -EBADF;
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+-              goto out_fput;
+-      ret = -EINVAL;
++              return -EBADF;
+       if (unlikely(!file->f_op->write_iter))
+-              goto out_fput;
++              return -EINVAL;
+       ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
+       if (ret)
+-              goto out_fput;
++              return ret;
+       ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
+       if (!ret) {
+               /*
+@@ -1582,9 +1580,6 @@ static ssize_t aio_write(struct kiocb *r
+               aio_rw_done(req, call_write_iter(file, req, &iter));
+       }
+       kfree(iovec);
+-out_fput:
+-      if (unlikely(ret))
+-              fput(file);
+       return ret;
+ }
+@@ -1594,7 +1589,6 @@ static void aio_fsync_work(struct work_s
+       int ret;
+       ret = vfs_fsync(req->file, req->datasync);
+-      fput(req->file);
+       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+ }
+@@ -1605,13 +1599,8 @@ static int aio_fsync(struct fsync_iocb *
+                       iocb->aio_rw_flags))
+               return -EINVAL;
+-      req->file = fget(iocb->aio_fildes);
+-      if (unlikely(!req->file))
+-              return -EBADF;
+-      if (unlikely(!req->file->f_op->fsync)) {
+-              fput(req->file);
++      if (unlikely(!req->file->f_op->fsync))
+               return -EINVAL;
+-      }
+       req->datasync = datasync;
+       INIT_WORK(&req->work, aio_fsync_work);
+@@ -1621,10 +1610,7 @@ static int aio_fsync(struct fsync_iocb *
+ static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
+ {
+-      struct file *file = iocb->poll.file;
+-
+       aio_complete(iocb, mangle_poll(mask), 0);
+-      fput(file);
+ }
+ static void aio_poll_complete_work(struct work_struct *work)
+@@ -1749,9 +1735,6 @@ static ssize_t aio_poll(struct aio_kiocb
+       INIT_WORK(&req->work, aio_poll_complete_work);
+       req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+-      req->file = fget(iocb->aio_fildes);
+-      if (unlikely(!req->file))
+-              return -EBADF;
+       req->head = NULL;
+       req->woken = false;
+@@ -1794,10 +1777,8 @@ static ssize_t aio_poll(struct aio_kiocb
+       spin_unlock_irq(&ctx->ctx_lock);
+ out:
+-      if (unlikely(apt.error)) {
+-              fput(req->file);
++      if (unlikely(apt.error))
+               return apt.error;
+-      }
+       if (mask)
+               aio_poll_complete(aiocb, mask);
+@@ -1835,6 +1816,11 @@ static int __io_submit_one(struct kioctx
+       if (unlikely(!req))
+               goto out_put_reqs_available;
++      req->ki_filp = fget(iocb->aio_fildes);
++      ret = -EBADF;
++      if (unlikely(!req->ki_filp))
++              goto out_put_req;
++
+       if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+               /*
+                * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -304,13 +304,19 @@ enum rw_hint {
+ struct kiocb {
+       struct file             *ki_filp;
++
++      /* The 'ki_filp' pointer is shared in a union for aio */
++      randomized_struct_fields_start
++
+       loff_t                  ki_pos;
+       void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+       void                    *private;
+       int                     ki_flags;
+       u16                     ki_hint;
+       u16                     ki_ioprio; /* See linux/ioprio.h */
+-} __randomize_layout;
++
++      randomized_struct_fields_end
++};
+ static inline bool is_sync_kiocb(struct kiocb *kiocb)
+ {
diff --git a/queue-5.0/bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch b/queue-5.0/bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch
new file mode 100644 (file)
index 0000000..2ffaab7
--- /dev/null
@@ -0,0 +1,46 @@
+From e20a2e9c42c9e4002d9e338d74e7819e88d77162 Mon Sep 17 00:00:00 2001
+From: Myungho Jung <mhjungk@gmail.com>
+Date: Sat, 2 Feb 2019 16:56:36 -0800
+Subject: Bluetooth: Fix decrementing reference count twice in releasing socket
+
+From: Myungho Jung <mhjungk@gmail.com>
+
+commit e20a2e9c42c9e4002d9e338d74e7819e88d77162 upstream.
+
+When releasing socket, it is possible to enter hci_sock_release() and
+hci_sock_dev_event(HCI_DEV_UNREG) at the same time in different thread.
+The reference count of hdev should be decremented only once from one of
+them but if storing hdev to local variable in hci_sock_release() before
+detached from socket and setting to NULL in hci_sock_dev_event(),
+hci_dev_put(hdev) is unexpectedly called twice. This is resolved by
+referencing hdev from socket after bt_sock_unlink() in
+hci_sock_release().
+
+Reported-by: syzbot+fdc00003f4efff43bc5b@syzkaller.appspotmail.com
+Signed-off-by: Myungho Jung <mhjungk@gmail.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hci_sock.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -831,8 +831,6 @@ static int hci_sock_release(struct socke
+       if (!sk)
+               return 0;
+-      hdev = hci_pi(sk)->hdev;
+-
+       switch (hci_pi(sk)->channel) {
+       case HCI_CHANNEL_MONITOR:
+               atomic_dec(&monitor_promisc);
+@@ -854,6 +852,7 @@ static int hci_sock_release(struct socke
+       bt_sock_unlink(&hci_sk_list, sk);
++      hdev = hci_pi(sk)->hdev;
+       if (hdev) {
+               if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+                       /* When releasing a user channel exclusive access,
diff --git a/queue-5.0/bluetooth-hci_ldisc-initialize-hci_dev-before-open.patch b/queue-5.0/bluetooth-hci_ldisc-initialize-hci_dev-before-open.patch
new file mode 100644 (file)
index 0000000..7e55e9f
--- /dev/null
@@ -0,0 +1,95 @@
+From 32a7b4cbe93b0a0ef7e63d31ca69ce54736c4412 Mon Sep 17 00:00:00 2001
+From: Jeremy Cline <jcline@redhat.com>
+Date: Wed, 6 Feb 2019 12:54:16 -0500
+Subject: Bluetooth: hci_ldisc: Initialize hci_dev before open()
+
+From: Jeremy Cline <jcline@redhat.com>
+
+commit 32a7b4cbe93b0a0ef7e63d31ca69ce54736c4412 upstream.
+
+The hci_dev struct hdev is referenced in work queues and timers started
+by open() in some protocols. This creates a race between the
+initialization function and the work or timer which can result hdev
+being dereferenced while it is still null.
+
+The syzbot report contains a reliable reproducer which causes a null
+pointer dereference of hdev in hci_uart_write_work() by making the
+memory allocation for hdev fail.
+
+To fix this, ensure hdev is valid from before calling a protocol's
+open() until after calling a protocol's close().
+
+Reported-by: syzbot+257790c15bcdef6fe00c@syzkaller.appspotmail.com
+Signed-off-by: Jeremy Cline <jcline@redhat.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/hci_ldisc.c |   21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_stru
+       err = hci_register_dev(hu->hdev);
+       if (err < 0) {
+               BT_ERR("Can't register HCI device");
++              clear_bit(HCI_UART_PROTO_READY, &hu->flags);
++              hu->proto->close(hu);
+               hdev = hu->hdev;
+               hu->hdev = NULL;
+               hci_free_dev(hdev);
+-              clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+-              hu->proto->close(hu);
+               return;
+       }
+@@ -616,6 +616,7 @@ static void hci_uart_tty_receive(struct
+ static int hci_uart_register_dev(struct hci_uart *hu)
+ {
+       struct hci_dev *hdev;
++      int err;
+       BT_DBG("");
+@@ -659,11 +660,22 @@ static int hci_uart_register_dev(struct
+       else
+               hdev->dev_type = HCI_PRIMARY;
++      /* Only call open() for the protocol after hdev is fully initialized as
++       * open() (or a timer/workqueue it starts) may attempt to reference it.
++       */
++      err = hu->proto->open(hu);
++      if (err) {
++              hu->hdev = NULL;
++              hci_free_dev(hdev);
++              return err;
++      }
++
+       if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+               return 0;
+       if (hci_register_dev(hdev) < 0) {
+               BT_ERR("Can't register HCI device");
++              hu->proto->close(hu);
+               hu->hdev = NULL;
+               hci_free_dev(hdev);
+               return -ENODEV;
+@@ -683,17 +695,12 @@ static int hci_uart_set_proto(struct hci
+       if (!p)
+               return -EPROTONOSUPPORT;
+-      err = p->open(hu);
+-      if (err)
+-              return err;
+-
+       hu->proto = p;
+       set_bit(HCI_UART_PROTO_READY, &hu->flags);
+       err = hci_uart_register_dev(hu);
+       if (err) {
+               clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+-              p->close(hu);
+               return err;
+       }
diff --git a/queue-5.0/bluetooth-hci_ldisc-postpone-hci_uart_proto_ready-bit-set-in-hci_uart_set_proto.patch b/queue-5.0/bluetooth-hci_ldisc-postpone-hci_uart_proto_ready-bit-set-in-hci_uart_set_proto.patch
new file mode 100644 (file)
index 0000000..817a48d
--- /dev/null
@@ -0,0 +1,57 @@
+From 56897b217a1d0a91c9920cb418d6b3fe922f590a Mon Sep 17 00:00:00 2001
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+Date: Sat, 23 Feb 2019 12:33:27 +0800
+Subject: Bluetooth: hci_ldisc: Postpone HCI_UART_PROTO_READY bit set in hci_uart_set_proto()
+
+From: Kefeng Wang <wangkefeng.wang@huawei.com>
+
+commit 56897b217a1d0a91c9920cb418d6b3fe922f590a upstream.
+
+task A:                                task B:
+hci_uart_set_proto                     flush_to_ldisc
+ - p->open(hu) -> h5_open  //alloc h5  - receive_buf
+ - set_bit HCI_UART_PROTO_READY         - tty_port_default_receive_buf
+ - hci_uart_register_dev                 - tty_ldisc_receive_buf
+                                          - hci_uart_tty_receive
+                                          - test_bit HCI_UART_PROTO_READY
+                                           - h5_recv
+ - clear_bit HCI_UART_PROTO_READY             while() {
+ - p->open(hu) -> h5_close //free h5
+                                             - h5_rx_3wire_hdr
+                                              - h5_reset()  //use-after-free
+                                              }
+
+It could use ioctl to set hci uart proto, but there is
+a use-after-free issue when hci_uart_register_dev() fail in
+hci_uart_set_proto(), see stack above, fix this by setting
+HCI_UART_PROTO_READY bit only when hci_uart_register_dev()
+return success.
+
+Reported-by: syzbot+899a33dc0fa0dbaf06a6@syzkaller.appspotmail.com
+Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
+Reviewed-by: Jeremy Cline <jcline@redhat.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/hci_ldisc.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -696,14 +696,13 @@ static int hci_uart_set_proto(struct hci
+               return -EPROTONOSUPPORT;
+       hu->proto = p;
+-      set_bit(HCI_UART_PROTO_READY, &hu->flags);
+       err = hci_uart_register_dev(hu);
+       if (err) {
+-              clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+               return err;
+       }
++      set_bit(HCI_UART_PROTO_READY, &hu->flags);
+       return 0;
+ }
diff --git a/queue-5.0/bluetooth-hci_uart-check-if-socket-buffer-is-err_ptr-in-h4_recv_buf.patch b/queue-5.0/bluetooth-hci_uart-check-if-socket-buffer-is-err_ptr-in-h4_recv_buf.patch
new file mode 100644 (file)
index 0000000..0f58406
--- /dev/null
@@ -0,0 +1,51 @@
+From 1dc2d785156cbdc80806c32e8d2c7c735d0b4721 Mon Sep 17 00:00:00 2001
+From: Myungho Jung <mhjungk@gmail.com>
+Date: Tue, 22 Jan 2019 00:33:26 -0800
+Subject: Bluetooth: hci_uart: Check if socket buffer is ERR_PTR in h4_recv_buf()
+
+From: Myungho Jung <mhjungk@gmail.com>
+
+commit 1dc2d785156cbdc80806c32e8d2c7c735d0b4721 upstream.
+
+h4_recv_buf() callers store the return value to socket buffer and
+recursively pass the buffer to h4_recv_buf() without protection. So,
+ERR_PTR returned from h4_recv_buf() can be dereferenced, if called again
+before setting the socket buffer to NULL from previous error. Check if
+skb is ERR_PTR in h4_recv_buf().
+
+Reported-by: syzbot+017a32f149406df32703@syzkaller.appspotmail.com
+Signed-off-by: Myungho Jung <mhjungk@gmail.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/h4_recv.h |    4 ++++
+ drivers/bluetooth/hci_h4.c  |    4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/bluetooth/h4_recv.h
++++ b/drivers/bluetooth/h4_recv.h
+@@ -60,6 +60,10 @@ static inline struct sk_buff *h4_recv_bu
+                                         const struct h4_recv_pkt *pkts,
+                                         int pkts_count)
+ {
++      /* Check for error from previous call */
++      if (IS_ERR(skb))
++              skb = NULL;
++
+       while (count) {
+               int i, len;
+--- a/drivers/bluetooth/hci_h4.c
++++ b/drivers/bluetooth/hci_h4.c
+@@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_d
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       u8 alignment = hu->alignment ? hu->alignment : 1;
++      /* Check for error from previous call */
++      if (IS_ERR(skb))
++              skb = NULL;
++
+       while (count) {
+               int i, len;
diff --git a/queue-5.0/drm-vkms-fix-flush_work-without-init_work.patch b/queue-5.0/drm-vkms-fix-flush_work-without-init_work.patch
new file mode 100644 (file)
index 0000000..8c9705c
--- /dev/null
@@ -0,0 +1,40 @@
+From b30b61ff6b1dc37f276cf56a8328b80086a3ffca Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Sat, 19 Jan 2019 01:43:43 +0900
+Subject: drm/vkms: Fix flush_work() without INIT_WORK().
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit b30b61ff6b1dc37f276cf56a8328b80086a3ffca upstream.
+
+syzbot is hitting a lockdep warning [1] because flush_work() is called
+without INIT_WORK() after kzalloc() at vkms_atomic_crtc_reset().
+
+Commit 6c234fe37c57627a ("drm/vkms: Implement CRC debugfs API") added
+INIT_WORK() to only vkms_atomic_crtc_duplicate_state() side. Assuming
+that lifecycle of crc_work is appropriately managed, fix this problem
+by adding INIT_WORK() to vkms_atomic_crtc_reset() side.
+
+[1] https://syzkaller.appspot.com/bug?id=a5954455fcfa51c29ca2ab55b203076337e1c770
+
+Reported-and-tested-by: syzbot <syzbot+12f1b031b6da017e34f8@syzkaller.appspotmail.com>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reviewed-by: Shayenne Moura <shayenneluzmoura@gmail.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/1547829823-9877-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vkms/vkms_crtc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -98,6 +98,7 @@ static void vkms_atomic_crtc_reset(struc
+       vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+       if (!vkms_state)
+               return;
++      INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+       crtc->state = &vkms_state->base;
+       crtc->state->crtc = crtc;
diff --git a/queue-5.0/f2fs-fix-to-avoid-deadlock-of-atomic-file-operations.patch b/queue-5.0/f2fs-fix-to-avoid-deadlock-of-atomic-file-operations.patch
new file mode 100644 (file)
index 0000000..27f8f71
--- /dev/null
@@ -0,0 +1,228 @@
+From 48432984d718c95cf13e26d487c2d1b697c3c01f Mon Sep 17 00:00:00 2001
+From: Chao Yu <yuchao0@huawei.com>
+Date: Mon, 25 Feb 2019 17:11:03 +0800
+Subject: f2fs: fix to avoid deadlock of atomic file operations
+
+From: Chao Yu <yuchao0@huawei.com>
+
+commit 48432984d718c95cf13e26d487c2d1b697c3c01f upstream.
+
+Thread A                               Thread B
+- __fput
+ - f2fs_release_file
+  - drop_inmem_pages
+   - mutex_lock(&fi->inmem_lock)
+   - __revoke_inmem_pages
+    - lock_page(page)
+                                       - open
+                                       - f2fs_setattr
+                                       - truncate_setsize
+                                        - truncate_inode_pages_range
+                                         - lock_page(page)
+                                         - truncate_cleanup_page
+                                          - f2fs_invalidate_page
+                                           - drop_inmem_page
+                                           - mutex_lock(&fi->inmem_lock);
+
+We may encounter above ABBA deadlock as reported by Kyungtae Kim:
+
+I'm reporting a bug in linux-4.17.19: "INFO: task hung in
+drop_inmem_page" (no reproducer)
+
+I think this might be somehow related to the following:
+https://groups.google.com/forum/#!searchin/syzkaller-bugs/INFO$3A$20task$20hung$20in$20%7Csort:date/syzkaller-bugs/c6soBTrdaIo/AjAzPeIzCgAJ
+
+=========================================
+INFO: task syz-executor7:10822 blocked for more than 120 seconds.
+      Not tainted 4.17.19 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+syz-executor7   D27024 10822   6346 0x00000004
+Call Trace:
+ context_switch kernel/sched/core.c:2867 [inline]
+ __schedule+0x721/0x1e60 kernel/sched/core.c:3515
+ schedule+0x88/0x1c0 kernel/sched/core.c:3559
+ schedule_preempt_disabled+0x18/0x30 kernel/sched/core.c:3617
+ __mutex_lock_common kernel/locking/mutex.c:833 [inline]
+ __mutex_lock+0x5bd/0x1410 kernel/locking/mutex.c:893
+ mutex_lock_nested+0x1b/0x20 kernel/locking/mutex.c:908
+ drop_inmem_page+0xcb/0x810 fs/f2fs/segment.c:327
+ f2fs_invalidate_page+0x337/0x5e0 fs/f2fs/data.c:2401
+ do_invalidatepage mm/truncate.c:165 [inline]
+ truncate_cleanup_page+0x261/0x330 mm/truncate.c:187
+ truncate_inode_pages_range+0x552/0x1610 mm/truncate.c:367
+ truncate_inode_pages mm/truncate.c:478 [inline]
+ truncate_pagecache+0x6d/0x90 mm/truncate.c:801
+ truncate_setsize+0x81/0xa0 mm/truncate.c:826
+ f2fs_setattr+0x44f/0x1270 fs/f2fs/file.c:781
+ notify_change+0xa62/0xe80 fs/attr.c:313
+ do_truncate+0x12e/0x1e0 fs/open.c:63
+ do_last fs/namei.c:2955 [inline]
+ path_openat+0x2042/0x29f0 fs/namei.c:3505
+ do_filp_open+0x1bd/0x2c0 fs/namei.c:3540
+ do_sys_open+0x35e/0x4e0 fs/open.c:1101
+ __do_sys_open fs/open.c:1119 [inline]
+ __se_sys_open fs/open.c:1114 [inline]
+ __x64_sys_open+0x89/0xc0 fs/open.c:1114
+ do_syscall_64+0xc4/0x4e0 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x4497b9
+RSP: 002b:00007f734e459c68 EFLAGS: 00000246 ORIG_RAX: 0000000000000002
+RAX: ffffffffffffffda RBX: 00007f734e45a6cc RCX: 00000000004497b9
+RDX: 0000000000000104 RSI: 00000000000a8280 RDI: 0000000020000080
+RBP: 000000000071bea0 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
+R13: 0000000000007230 R14: 00000000006f02d0 R15: 00007f734e45a700
+INFO: task syz-executor7:10858 blocked for more than 120 seconds.
+      Not tainted 4.17.19 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+syz-executor7   D28880 10858   6346 0x00000004
+Call Trace:
+ context_switch kernel/sched/core.c:2867 [inline]
+ __schedule+0x721/0x1e60 kernel/sched/core.c:3515
+ schedule+0x88/0x1c0 kernel/sched/core.c:3559
+ __rwsem_down_write_failed_common kernel/locking/rwsem-xadd.c:565 [inline]
+ rwsem_down_write_failed+0x5e6/0xc90 kernel/locking/rwsem-xadd.c:594
+ call_rwsem_down_write_failed+0x17/0x30 arch/x86/lib/rwsem.S:117
+ __down_write arch/x86/include/asm/rwsem.h:142 [inline]
+ down_write+0x58/0xa0 kernel/locking/rwsem.c:72
+ inode_lock include/linux/fs.h:713 [inline]
+ do_truncate+0x120/0x1e0 fs/open.c:61
+ do_last fs/namei.c:2955 [inline]
+ path_openat+0x2042/0x29f0 fs/namei.c:3505
+ do_filp_open+0x1bd/0x2c0 fs/namei.c:3540
+ do_sys_open+0x35e/0x4e0 fs/open.c:1101
+ __do_sys_open fs/open.c:1119 [inline]
+ __se_sys_open fs/open.c:1114 [inline]
+ __x64_sys_open+0x89/0xc0 fs/open.c:1114
+ do_syscall_64+0xc4/0x4e0 arch/x86/entry/common.c:287
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x4497b9
+RSP: 002b:00007f734e3b4c68 EFLAGS: 00000246 ORIG_RAX: 0000000000000002
+RAX: ffffffffffffffda RBX: 00007f734e3b56cc RCX: 00000000004497b9
+RDX: 0000000000000104 RSI: 00000000000a8280 RDI: 0000000020000080
+RBP: 000000000071c238 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
+R13: 0000000000007230 R14: 00000000006f02d0 R15: 00007f734e3b5700
+INFO: task syz-executor5:10829 blocked for more than 120 seconds.
+      Not tainted 4.17.19 #1
+"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+syz-executor5   D28760 10829   6308 0x80000002
+Call Trace:
+ context_switch kernel/sched/core.c:2867 [inline]
+ __schedule+0x721/0x1e60 kernel/sched/core.c:3515
+ schedule+0x88/0x1c0 kernel/sched/core.c:3559
+ io_schedule+0x21/0x80 kernel/sched/core.c:5179
+ wait_on_page_bit_common mm/filemap.c:1100 [inline]
+ __lock_page+0x2b5/0x390 mm/filemap.c:1273
+ lock_page include/linux/pagemap.h:483 [inline]
+ __revoke_inmem_pages+0xb35/0x11c0 fs/f2fs/segment.c:231
+ drop_inmem_pages+0xa3/0x3e0 fs/f2fs/segment.c:306
+ f2fs_release_file+0x2c7/0x330 fs/f2fs/file.c:1556
+ __fput+0x2c7/0x780 fs/file_table.c:209
+ ____fput+0x1a/0x20 fs/file_table.c:243
+ task_work_run+0x151/0x1d0 kernel/task_work.c:113
+ exit_task_work include/linux/task_work.h:22 [inline]
+ do_exit+0x8ba/0x30a0 kernel/exit.c:865
+ do_group_exit+0x13b/0x3a0 kernel/exit.c:968
+ get_signal+0x6bb/0x1650 kernel/signal.c:2482
+ do_signal+0x84/0x1b70 arch/x86/kernel/signal.c:810
+ exit_to_usermode_loop+0x155/0x190 arch/x86/entry/common.c:162
+ prepare_exit_to_usermode arch/x86/entry/common.c:196 [inline]
+ syscall_return_slowpath arch/x86/entry/common.c:265 [inline]
+ do_syscall_64+0x445/0x4e0 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x4497b9
+RSP: 002b:00007f1c68e74ce8 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+RAX: fffffffffffffe00 RBX: 000000000071bf80 RCX: 00000000004497b9
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: 000000000071bf80
+RBP: 000000000071bf80 R08: 0000000000000000 R09: 000000000071bf58
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 0000000000000000 R14: 00007f1c68e759c0 R15: 00007f1c68e75700
+
+This patch tries to use trylock_page to mitigate such deadlock condition
+for fix.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/segment.c |   43 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 31 insertions(+), 12 deletions(-)
+
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -215,7 +215,8 @@ void f2fs_register_inmem_page(struct ino
+ }
+ static int __revoke_inmem_pages(struct inode *inode,
+-                              struct list_head *head, bool drop, bool recover)
++                              struct list_head *head, bool drop, bool recover,
++                              bool trylock)
+ {
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct inmem_pages *cur, *tmp;
+@@ -227,7 +228,16 @@ static int __revoke_inmem_pages(struct i
+               if (drop)
+                       trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+-              lock_page(page);
++              if (trylock) {
++                      /*
++                       * to avoid deadlock in between page lock and
++                       * inmem_lock.
++                       */
++                      if (!trylock_page(page))
++                              continue;
++              } else {
++                      lock_page(page);
++              }
+               f2fs_wait_on_page_writeback(page, DATA, true, true);
+@@ -318,13 +328,19 @@ void f2fs_drop_inmem_pages(struct inode
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+-      mutex_lock(&fi->inmem_lock);
+-      __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+-      spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+-      if (!list_empty(&fi->inmem_ilist))
+-              list_del_init(&fi->inmem_ilist);
+-      spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+-      mutex_unlock(&fi->inmem_lock);
++      while (!list_empty(&fi->inmem_pages)) {
++              mutex_lock(&fi->inmem_lock);
++              __revoke_inmem_pages(inode, &fi->inmem_pages,
++                                              true, false, true);
++
++              if (list_empty(&fi->inmem_pages)) {
++                      spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
++                      if (!list_empty(&fi->inmem_ilist))
++                              list_del_init(&fi->inmem_ilist);
++                      spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
++              }
++              mutex_unlock(&fi->inmem_lock);
++      }
+       clear_inode_flag(inode, FI_ATOMIC_FILE);
+       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
+@@ -429,12 +445,15 @@ retry:
+                * recovery or rewrite & commit last transaction. For other
+                * error number, revoking was done by filesystem itself.
+                */
+-              err = __revoke_inmem_pages(inode, &revoke_list, false, true);
++              err = __revoke_inmem_pages(inode, &revoke_list,
++                                              false, true, false);
+               /* drop all uncommitted pages */
+-              __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
++              __revoke_inmem_pages(inode, &fi->inmem_pages,
++                                              true, false, false);
+       } else {
+-              __revoke_inmem_pages(inode, &revoke_list, false, false);
++              __revoke_inmem_pages(inode, &revoke_list,
++                                              false, false, false);
+       }
+       return err;
diff --git a/queue-5.0/locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch b/queue-5.0/locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch
new file mode 100644 (file)
index 0000000..b1a0dd3
--- /dev/null
@@ -0,0 +1,48 @@
+From 71492580571467fb7177aade19c18ce7486267f5 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Wed, 9 Jan 2019 23:03:25 -0500
+Subject: locking/lockdep: Add debug_locks check in __lock_downgrade()
+
+From: Waiman Long <longman@redhat.com>
+
+commit 71492580571467fb7177aade19c18ce7486267f5 upstream.
+
+Tetsuo Handa had reported he saw an incorrect "downgrading a read lock"
+warning right after a previous lockdep warning. It is likely that the
+previous warning turned off lock debugging causing the lockdep to have
+inconsistency states leading to the lock downgrade warning.
+
+Fix that by add a check for debug_locks at the beginning of
+__lock_downgrade().
+
+Debugged-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Reported-by: syzbot+53383ae265fb161ef488@syzkaller.appspotmail.com
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Link: https://lkml.kernel.org/r/1547093005-26085-1-git-send-email-longman@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/lockdep.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3535,6 +3535,9 @@ static int __lock_downgrade(struct lockd
+       unsigned int depth;
+       int i;
++      if (unlikely(!debug_locks))
++              return 0;
++
+       depth = curr->lockdep_depth;
+       /*
+        * This function is about (re)setting the class of a held lock,
diff --git a/queue-5.0/loop-access-lo_backing_file-only-when-the-loop-device-is-lo_bound.patch b/queue-5.0/loop-access-lo_backing_file-only-when-the-loop-device-is-lo_bound.patch
new file mode 100644 (file)
index 0000000..70aa4da
--- /dev/null
@@ -0,0 +1,63 @@
+From f7c8a4120eedf24c36090b7542b179ff7a649219 Mon Sep 17 00:00:00 2001
+From: Dongli Zhang <dongli.zhang@oracle.com>
+Date: Mon, 18 Mar 2019 20:23:17 +0800
+Subject: loop: access lo_backing_file only when the loop device is Lo_bound
+
+From: Dongli Zhang <dongli.zhang@oracle.com>
+
+commit f7c8a4120eedf24c36090b7542b179ff7a649219 upstream.
+
+Commit 758a58d0bc67 ("loop: set GENHD_FL_NO_PART_SCAN after
+blkdev_reread_part()") separates "lo->lo_backing_file = NULL" and
+"lo->lo_state = Lo_unbound" into different critical regions protected by
+loop_ctl_mutex.
+
+However, there is below race that the NULL lo->lo_backing_file would be
+accessed when the backend of a loop is another loop device, e.g., loop0's
+backend is a file, while loop1's backend is loop0.
+
+loop0's backend is file            loop1's backend is loop0
+
+__loop_clr_fd()
+  mutex_lock(&loop_ctl_mutex);
+  lo->lo_backing_file = NULL; --> set to NULL
+  mutex_unlock(&loop_ctl_mutex);
+                                   loop_set_fd()
+                                     mutex_lock_killable(&loop_ctl_mutex);
+                                     loop_validate_file()
+                                       f = l->lo_backing_file; --> NULL
+                                         access if loop0 is not Lo_unbound
+  mutex_lock(&loop_ctl_mutex);
+  lo->lo_state = Lo_unbound;
+  mutex_unlock(&loop_ctl_mutex);
+
+lo->lo_backing_file should be accessed only when the loop device is
+Lo_bound.
+
+In fact, the problem has been introduced already in commit 7ccd0791d985
+("loop: Push loop_ctl_mutex down into loop_clr_fd()") after which
+loop_validate_file() could see devices in Lo_rundown state with which it
+did not count. It was harmless at that point but still.
+
+Fixes: 7ccd0791d985 ("loop: Push loop_ctl_mutex down into loop_clr_fd()")
+Reported-by: syzbot+9bdc1adc1c55e7fe765b@syzkaller.appspotmail.com
+Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/loop.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -656,7 +656,7 @@ static int loop_validate_file(struct fil
+                       return -EBADF;
+               l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+-              if (l->lo_state == Lo_unbound) {
++              if (l->lo_state != Lo_bound) {
+                       return -EINVAL;
+               }
+               f = l->lo_backing_file;
diff --git a/queue-5.0/media-v4l2-ctrls.c-uvc-zero-v4l2_event.patch b/queue-5.0/media-v4l2-ctrls.c-uvc-zero-v4l2_event.patch
new file mode 100644 (file)
index 0000000..51a9a64
--- /dev/null
@@ -0,0 +1,49 @@
+From f45f3f753b0a3d739acda8e311b4f744d82dc52a Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil@xs4all.nl>
+Date: Tue, 18 Dec 2018 08:37:08 -0500
+Subject: media: v4l2-ctrls.c/uvc: zero v4l2_event
+
+From: Hans Verkuil <hverkuil@xs4all.nl>
+
+commit f45f3f753b0a3d739acda8e311b4f744d82dc52a upstream.
+
+Control events can leak kernel memory since they do not fully zero the
+event. The same code is present in both v4l2-ctrls.c and uvc_ctrl.c, so
+fix both.
+
+It appears that all other event code is properly zeroing the structure,
+it's these two places.
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Reported-by: syzbot+4f021cf3697781dbd9fb@syzkaller.appspotmail.com
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/uvc/uvc_ctrl.c     |    2 +-
+ drivers/media/v4l2-core/v4l2-ctrls.c |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct u
+       __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
+-      memset(ev->reserved, 0, sizeof(ev->reserved));
++      memset(ev, 0, sizeof(*ev));
+       ev->type = V4L2_EVENT_CTRL;
+       ev->id = v4l2_ctrl.id;
+       ev->u.ctrl.value = value;
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -1387,7 +1387,7 @@ static u32 user_flags(const struct v4l2_
+ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
+ {
+-      memset(ev->reserved, 0, sizeof(ev->reserved));
++      memset(ev, 0, sizeof(*ev));
+       ev->type = V4L2_EVENT_CTRL;
+       ev->id = ctrl->id;
+       ev->u.ctrl.changes = changes;
diff --git a/queue-5.0/mm-mempolicy-fix-uninit-memory-access.patch b/queue-5.0/mm-mempolicy-fix-uninit-memory-access.patch
new file mode 100644 (file)
index 0000000..3e18f81
--- /dev/null
@@ -0,0 +1,93 @@
+From 2e25644e8da4ed3a27e7b8315aaae74660be72dc Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Tue, 5 Mar 2019 15:46:50 -0800
+Subject: mm, mempolicy: fix uninit memory access
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 2e25644e8da4ed3a27e7b8315aaae74660be72dc upstream.
+
+Syzbot with KMSAN reports (excerpt):
+
+==================================================================
+BUG: KMSAN: uninit-value in mpol_rebind_policy mm/mempolicy.c:353 [inline]
+BUG: KMSAN: uninit-value in mpol_rebind_mm+0x249/0x370 mm/mempolicy.c:384
+CPU: 1 PID: 17420 Comm: syz-executor4 Not tainted 4.20.0-rc7+ #15
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+  __dump_stack lib/dump_stack.c:77 [inline]
+  dump_stack+0x173/0x1d0 lib/dump_stack.c:113
+  kmsan_report+0x12e/0x2a0 mm/kmsan/kmsan.c:613
+  __msan_warning+0x82/0xf0 mm/kmsan/kmsan_instr.c:295
+  mpol_rebind_policy mm/mempolicy.c:353 [inline]
+  mpol_rebind_mm+0x249/0x370 mm/mempolicy.c:384
+  update_tasks_nodemask+0x608/0xca0 kernel/cgroup/cpuset.c:1120
+  update_nodemasks_hier kernel/cgroup/cpuset.c:1185 [inline]
+  update_nodemask kernel/cgroup/cpuset.c:1253 [inline]
+  cpuset_write_resmask+0x2a98/0x34b0 kernel/cgroup/cpuset.c:1728
+
+...
+
+Uninit was created at:
+  kmsan_save_stack_with_flags mm/kmsan/kmsan.c:204 [inline]
+  kmsan_internal_poison_shadow+0x92/0x150 mm/kmsan/kmsan.c:158
+  kmsan_kmalloc+0xa6/0x130 mm/kmsan/kmsan_hooks.c:176
+  kmem_cache_alloc+0x572/0xb90 mm/slub.c:2777
+  mpol_new mm/mempolicy.c:276 [inline]
+  do_mbind mm/mempolicy.c:1180 [inline]
+  kernel_mbind+0x8a7/0x31a0 mm/mempolicy.c:1347
+  __do_sys_mbind mm/mempolicy.c:1354 [inline]
+
+As it's difficult to report where exactly the uninit value resides in
+the mempolicy object, we have to guess a bit.  mm/mempolicy.c:353
+contains this part of mpol_rebind_policy():
+
+        if (!mpol_store_user_nodemask(pol) &&
+            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
+
+"mpol_store_user_nodemask(pol)" is testing pol->flags, which I couldn't
+ever see being uninitialized after leaving mpol_new().  So I'll guess
+it's actually about accessing pol->w.cpuset_mems_allowed on line 354,
+but still part of statement starting on line 353.
+
+For w.cpuset_mems_allowed to be not initialized, and the nodes_equal()
+reachable for a mempolicy where mpol_set_nodemask() is called in
+do_mbind(), it seems the only possibility is a MPOL_PREFERRED policy
+with empty set of nodes, i.e.  MPOL_LOCAL equivalent, with MPOL_F_LOCAL
+flag.  Let's exclude such policies from the nodes_equal() check.  Note
+the uninit access should be benign anyway, as rebinding this kind of
+policy is always a no-op.  Therefore no actual need for stable
+inclusion.
+
+Link: http://lkml.kernel.org/r/a71997c3-e8ae-a787-d5ce-3db05768b27c@suse.cz
+Link: http://lkml.kernel.org/r/73da3e9c-cc84-509e-17d9-0c434bb9967d@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: syzbot+b19c2dc2c990ea657a71@syzkaller.appspotmail.com
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Yisheng Xie <xieyisheng1@huawei.com>
+Cc: zhong jiang <zhongjiang@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -350,7 +350,7 @@ static void mpol_rebind_policy(struct me
+ {
+       if (!pol)
+               return;
+-      if (!mpol_store_user_nodemask(pol) &&
++      if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
+           nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
+               return;
diff --git a/queue-5.0/netfilter-ebtables-remove-bugprint-messages.patch b/queue-5.0/netfilter-ebtables-remove-bugprint-messages.patch
new file mode 100644 (file)
index 0000000..86ae007
--- /dev/null
@@ -0,0 +1,361 @@
+From d824548dae220820bdf69b2d1561b7c4b072783f Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Tue, 19 Feb 2019 00:37:21 +0100
+Subject: netfilter: ebtables: remove BUGPRINT messages
+
+From: Florian Westphal <fw@strlen.de>
+
+commit d824548dae220820bdf69b2d1561b7c4b072783f upstream.
+
+They are however frequently triggered by syzkaller, so remove them.
+
+ebtables userspace should never trigger any of these, so there is little
+value in making them pr_debug (or ratelimited).
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bridge/netfilter/ebtables.c |  131 +++++++++++-----------------------------
+ 1 file changed, 39 insertions(+), 92 deletions(-)
+
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -31,10 +31,6 @@
+ /* needed for logical [in,out]-dev filtering */
+ #include "../br_private.h"
+-#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
+-                                       "report to author: "format, ## args)
+-/* #define BUGPRINT(format, args...) */
+-
+ /* Each cpu has its own set of counters, so there is no need for write_lock in
+  * the softirq
+  * For reading or updating the counters, the user context needs to
+@@ -466,8 +462,6 @@ static int ebt_verify_pointers(const str
+                               /* we make userspace set this right,
+                                * so there is no misunderstanding
+                                */
+-                              BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
+-                                       "in distinguisher\n");
+                               return -EINVAL;
+                       }
+                       if (i != NF_BR_NUMHOOKS)
+@@ -485,18 +479,14 @@ static int ebt_verify_pointers(const str
+                       offset += e->next_offset;
+               }
+       }
+-      if (offset != limit) {
+-              BUGPRINT("entries_size too small\n");
++      if (offset != limit)
+               return -EINVAL;
+-      }
+       /* check if all valid hooks have a chain */
+       for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+               if (!newinfo->hook_entry[i] &&
+-                 (valid_hooks & (1 << i))) {
+-                      BUGPRINT("Valid hook without chain\n");
++                 (valid_hooks & (1 << i)))
+                       return -EINVAL;
+-              }
+       }
+       return 0;
+ }
+@@ -523,26 +513,20 @@ ebt_check_entry_size_and_hooks(const str
+               /* this checks if the previous chain has as many entries
+                * as it said it has
+                */
+-              if (*n != *cnt) {
+-                      BUGPRINT("nentries does not equal the nr of entries "
+-                               "in the chain\n");
++              if (*n != *cnt)
+                       return -EINVAL;
+-              }
++
+               if (((struct ebt_entries *)e)->policy != EBT_DROP &&
+                  ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
+                       /* only RETURN from udc */
+                       if (i != NF_BR_NUMHOOKS ||
+-                         ((struct ebt_entries *)e)->policy != EBT_RETURN) {
+-                              BUGPRINT("bad policy\n");
++                         ((struct ebt_entries *)e)->policy != EBT_RETURN)
+                               return -EINVAL;
+-                      }
+               }
+               if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
+                       (*udc_cnt)++;
+-              if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
+-                      BUGPRINT("counter_offset != totalcnt");
++              if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
+                       return -EINVAL;
+-              }
+               *n = ((struct ebt_entries *)e)->nentries;
+               *cnt = 0;
+               return 0;
+@@ -550,15 +534,13 @@ ebt_check_entry_size_and_hooks(const str
+       /* a plain old entry, heh */
+       if (sizeof(struct ebt_entry) > e->watchers_offset ||
+          e->watchers_offset > e->target_offset ||
+-         e->target_offset >= e->next_offset) {
+-              BUGPRINT("entry offsets not in right order\n");
++         e->target_offset >= e->next_offset)
+               return -EINVAL;
+-      }
++
+       /* this is not checked anywhere else */
+-      if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
+-              BUGPRINT("target size too small\n");
++      if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
+               return -EINVAL;
+-      }
++
+       (*cnt)++;
+       (*totalcnt)++;
+       return 0;
+@@ -678,18 +660,15 @@ ebt_check_entry(struct ebt_entry *e, str
+       if (e->bitmask == 0)
+               return 0;
+-      if (e->bitmask & ~EBT_F_MASK) {
+-              BUGPRINT("Unknown flag for bitmask\n");
++      if (e->bitmask & ~EBT_F_MASK)
+               return -EINVAL;
+-      }
+-      if (e->invflags & ~EBT_INV_MASK) {
+-              BUGPRINT("Unknown flag for inv bitmask\n");
++
++      if (e->invflags & ~EBT_INV_MASK)
+               return -EINVAL;
+-      }
+-      if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
+-              BUGPRINT("NOPROTO & 802_3 not allowed\n");
++
++      if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
+               return -EINVAL;
+-      }
++
+       /* what hook do we belong to? */
+       for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+               if (!newinfo->hook_entry[i])
+@@ -748,13 +727,11 @@ ebt_check_entry(struct ebt_entry *e, str
+       t->u.target = target;
+       if (t->u.target == &ebt_standard_target) {
+               if (gap < sizeof(struct ebt_standard_target)) {
+-                      BUGPRINT("Standard target size too big\n");
+                       ret = -EFAULT;
+                       goto cleanup_watchers;
+               }
+               if (((struct ebt_standard_target *)t)->verdict <
+                  -NUM_STANDARD_TARGETS) {
+-                      BUGPRINT("Invalid standard target\n");
+                       ret = -EFAULT;
+                       goto cleanup_watchers;
+               }
+@@ -813,10 +790,9 @@ static int check_chainloops(const struct
+               if (strcmp(t->u.name, EBT_STANDARD_TARGET))
+                       goto letscontinue;
+               if (e->target_offset + sizeof(struct ebt_standard_target) >
+-                 e->next_offset) {
+-                      BUGPRINT("Standard target size too big\n");
++                 e->next_offset)
+                       return -1;
+-              }
++
+               verdict = ((struct ebt_standard_target *)t)->verdict;
+               if (verdict >= 0) { /* jump to another chain */
+                       struct ebt_entries *hlp2 =
+@@ -825,14 +801,12 @@ static int check_chainloops(const struct
+                               if (hlp2 == cl_s[i].cs.chaininfo)
+                                       break;
+                       /* bad destination or loop */
+-                      if (i == udc_cnt) {
+-                              BUGPRINT("bad destination\n");
++                      if (i == udc_cnt)
+                               return -1;
+-                      }
+-                      if (cl_s[i].cs.n) {
+-                              BUGPRINT("loop\n");
++
++                      if (cl_s[i].cs.n)
+                               return -1;
+-                      }
++
+                       if (cl_s[i].hookmask & (1 << hooknr))
+                               goto letscontinue;
+                       /* this can't be 0, so the loop test is correct */
+@@ -865,24 +839,21 @@ static int translate_table(struct net *n
+       i = 0;
+       while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
+               i++;
+-      if (i == NF_BR_NUMHOOKS) {
+-              BUGPRINT("No valid hooks specified\n");
++      if (i == NF_BR_NUMHOOKS)
+               return -EINVAL;
+-      }
+-      if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
+-              BUGPRINT("Chains don't start at beginning\n");
++
++      if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
+               return -EINVAL;
+-      }
++
+       /* make sure chains are ordered after each other in same order
+        * as their corresponding hooks
+        */
+       for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
+               if (!newinfo->hook_entry[j])
+                       continue;
+-              if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
+-                      BUGPRINT("Hook order must be followed\n");
++              if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
+                       return -EINVAL;
+-              }
++
+               i = j;
+       }
+@@ -900,15 +871,11 @@ static int translate_table(struct net *n
+       if (ret != 0)
+               return ret;
+-      if (i != j) {
+-              BUGPRINT("nentries does not equal the nr of entries in the "
+-                       "(last) chain\n");
++      if (i != j)
+               return -EINVAL;
+-      }
+-      if (k != newinfo->nentries) {
+-              BUGPRINT("Total nentries is wrong\n");
++
++      if (k != newinfo->nentries)
+               return -EINVAL;
+-      }
+       /* get the location of the udc, put them in an array
+        * while we're at it, allocate the chainstack
+@@ -942,7 +909,6 @@ static int translate_table(struct net *n
+                  ebt_get_udc_positions, newinfo, &i, cl_s);
+               /* sanity check */
+               if (i != udc_cnt) {
+-                      BUGPRINT("i != udc_cnt\n");
+                       vfree(cl_s);
+                       return -EFAULT;
+               }
+@@ -1042,7 +1008,6 @@ static int do_replace_finish(struct net
+               goto free_unlock;
+       if (repl->num_counters && repl->num_counters != t->private->nentries) {
+-              BUGPRINT("Wrong nr. of counters requested\n");
+               ret = -EINVAL;
+               goto free_unlock;
+       }
+@@ -1118,15 +1083,12 @@ static int do_replace(struct net *net, c
+       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+               return -EFAULT;
+-      if (len != sizeof(tmp) + tmp.entries_size) {
+-              BUGPRINT("Wrong len argument\n");
++      if (len != sizeof(tmp) + tmp.entries_size)
+               return -EINVAL;
+-      }
+-      if (tmp.entries_size == 0) {
+-              BUGPRINT("Entries_size never zero\n");
++      if (tmp.entries_size == 0)
+               return -EINVAL;
+-      }
++
+       /* overflow check */
+       if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
+                       NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+@@ -1153,7 +1115,6 @@ static int do_replace(struct net *net, c
+       }
+       if (copy_from_user(
+          newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
+-              BUGPRINT("Couldn't copy entries from userspace\n");
+               ret = -EFAULT;
+               goto free_entries;
+       }
+@@ -1194,10 +1155,8 @@ int ebt_register_table(struct net *net,
+       if (input_table == NULL || (repl = input_table->table) == NULL ||
+           repl->entries == NULL || repl->entries_size == 0 ||
+-          repl->counters != NULL || input_table->private != NULL) {
+-              BUGPRINT("Bad table data for ebt_register_table!!!\n");
++          repl->counters != NULL || input_table->private != NULL)
+               return -EINVAL;
+-      }
+       /* Don't add one table to multiple lists. */
+       table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
+@@ -1235,13 +1194,10 @@ int ebt_register_table(struct net *net,
+                               ((char *)repl->hook_entry[i] - repl->entries);
+       }
+       ret = translate_table(net, repl->name, newinfo);
+-      if (ret != 0) {
+-              BUGPRINT("Translate_table failed\n");
++      if (ret != 0)
+               goto free_chainstack;
+-      }
+       if (table->check && table->check(newinfo, table->valid_hooks)) {
+-              BUGPRINT("The table doesn't like its own initial data, lol\n");
+               ret = -EINVAL;
+               goto free_chainstack;
+       }
+@@ -1252,7 +1208,6 @@ int ebt_register_table(struct net *net,
+       list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
+               if (strcmp(t->name, table->name) == 0) {
+                       ret = -EEXIST;
+-                      BUGPRINT("Table name already exists\n");
+                       goto free_unlock;
+               }
+       }
+@@ -1320,7 +1275,6 @@ static int do_update_counters(struct net
+               goto free_tmp;
+       if (num_counters != t->private->nentries) {
+-              BUGPRINT("Wrong nr of counters\n");
+               ret = -EINVAL;
+               goto unlock_mutex;
+       }
+@@ -1447,10 +1401,8 @@ static int copy_counters_to_user(struct
+       if (num_counters == 0)
+               return 0;
+-      if (num_counters != nentries) {
+-              BUGPRINT("Num_counters wrong\n");
++      if (num_counters != nentries)
+               return -EINVAL;
+-      }
+       counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
+       if (!counterstmp)
+@@ -1496,15 +1448,11 @@ static int copy_everything_to_user(struc
+          (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
+               return -EINVAL;
+-      if (tmp.nentries != nentries) {
+-              BUGPRINT("Nentries wrong\n");
++      if (tmp.nentries != nentries)
+               return -EINVAL;
+-      }
+-      if (tmp.entries_size != entries_size) {
+-              BUGPRINT("Wrong size\n");
++      if (tmp.entries_size != entries_size)
+               return -EINVAL;
+-      }
+       ret = copy_counters_to_user(t, oldcounters, tmp.counters,
+                                       tmp.num_counters, nentries);
+@@ -1576,7 +1524,6 @@ static int do_ebt_get_ctl(struct sock *s
+               }
+               mutex_unlock(&ebt_mutex);
+               if (copy_to_user(user, &tmp, *len) != 0) {
+-                      BUGPRINT("c2u Didn't work\n");
+                       ret = -EFAULT;
+                       break;
+               }
diff --git a/queue-5.0/rdma-cma-rollback-source-ip-address-if-failing-to-acquire-device.patch b/queue-5.0/rdma-cma-rollback-source-ip-address-if-failing-to-acquire-device.patch
new file mode 100644 (file)
index 0000000..4a99e55
--- /dev/null
@@ -0,0 +1,61 @@
+From 5fc01fb846bce8fa6d5f95e2625b8ce0f8e86810 Mon Sep 17 00:00:00 2001
+From: Myungho Jung <mhjungk@gmail.com>
+Date: Wed, 9 Jan 2019 22:27:31 -0800
+Subject: RDMA/cma: Rollback source IP address if failing to acquire device
+
+From: Myungho Jung <mhjungk@gmail.com>
+
+commit 5fc01fb846bce8fa6d5f95e2625b8ce0f8e86810 upstream.
+
+If cma_acquire_dev_by_src_ip() returns error in addr_handler(), the
+device state changes back to RDMA_CM_ADDR_BOUND but the resolved source
+IP address is still left. After that, if rdma_destroy_id() is called
+after rdma_listen(), the device is freed without removed from
+listen_any_list in cma_cancel_operation(). Revert to the previous IP
+address if acquiring device fails.
+
+Reported-by: syzbot+f3ce716af730c8f96637@syzkaller.appspotmail.com
+Signed-off-by: Myungho Jung <mhjungk@gmail.com>
+Reviewed-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/cma.c |   13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2966,13 +2966,22 @@ static void addr_handler(int status, str
+ {
+       struct rdma_id_private *id_priv = context;
+       struct rdma_cm_event event = {};
++      struct sockaddr *addr;
++      struct sockaddr_storage old_addr;
+       mutex_lock(&id_priv->handler_mutex);
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
+                          RDMA_CM_ADDR_RESOLVED))
+               goto out;
+-      memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
++      /*
++       * Store the previous src address, so that if we fail to acquire
++       * matching rdma device, old address can be restored back, which helps
++       * to cancel the cma listen operation correctly.
++       */
++      addr = cma_src_addr(id_priv);
++      memcpy(&old_addr, addr, rdma_addr_size(addr));
++      memcpy(addr, src_addr, rdma_addr_size(src_addr));
+       if (!status && !id_priv->cma_dev) {
+               status = cma_acquire_dev_by_src_ip(id_priv);
+               if (status)
+@@ -2983,6 +2992,8 @@ static void addr_handler(int status, str
+       }
+       if (status) {
++              memcpy(addr, &old_addr,
++                     rdma_addr_size((struct sockaddr *)&old_addr));
+               if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+                                  RDMA_CM_ADDR_BOUND))
+                       goto out;
index cbfb4d0566bb3eaa233e23727d8b82bdd4eefc0f..2b9a6934b2fba0ea682d9add9b1abb689e2e69e3 100644 (file)
@@ -33,3 +33,18 @@ alsa-ac97-fix-of-node-refcount-unbalance.patch
 ext4-fix-null-pointer-dereference-while-journal-is-aborted.patch
 ext4-fix-data-corruption-caused-by-unaligned-direct-aio.patch
 ext4-brelse-all-indirect-buffer-in-ext4_ind_remove_space.patch
+media-v4l2-ctrls.c-uvc-zero-v4l2_event.patch
+bluetooth-hci_uart-check-if-socket-buffer-is-err_ptr-in-h4_recv_buf.patch
+bluetooth-fix-decrementing-reference-count-twice-in-releasing-socket.patch
+bluetooth-hci_ldisc-initialize-hci_dev-before-open.patch
+bluetooth-hci_ldisc-postpone-hci_uart_proto_ready-bit-set-in-hci_uart_set_proto.patch
+drm-vkms-fix-flush_work-without-init_work.patch
+rdma-cma-rollback-source-ip-address-if-failing-to-acquire-device.patch
+f2fs-fix-to-avoid-deadlock-of-atomic-file-operations.patch
+aio-simplify-and-fix-fget-fput-for-io_submit.patch
+netfilter-ebtables-remove-bugprint-messages.patch
+loop-access-lo_backing_file-only-when-the-loop-device-is-lo_bound.patch
+x86-unwind-handle-null-pointer-calls-better-in-frame-unwinder.patch
+x86-unwind-add-hardcoded-orc-entry-for-null.patch
+locking-lockdep-add-debug_locks-check-in-__lock_downgrade.patch
+mm-mempolicy-fix-uninit-memory-access.patch
diff --git a/queue-5.0/x86-unwind-add-hardcoded-orc-entry-for-null.patch b/queue-5.0/x86-unwind-add-hardcoded-orc-entry-for-null.patch
new file mode 100644 (file)
index 0000000..28d20a2
--- /dev/null
@@ -0,0 +1,91 @@
+From ac5ceccce5501e43d217c596e4ee859f2a3fef79 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 1 Mar 2019 04:12:01 +0100
+Subject: x86/unwind: Add hardcoded ORC entry for NULL
+
+From: Jann Horn <jannh@google.com>
+
+commit ac5ceccce5501e43d217c596e4ee859f2a3fef79 upstream.
+
+When the ORC unwinder is invoked for an oops caused by IP==0,
+it currently has no idea what to do because there is no debug information
+for the stack frame of NULL.
+
+But if RIP is NULL, it is very likely that the last successfully executed
+instruction was an indirect CALL/JMP, and it is possible to unwind out in
+the same way as for the first instruction of a normal function. Hardcode
+a corresponding ORC entry.
+
+With an artificially-added NULL call in prctl_set_seccomp(), before this
+patch, the trace is:
+
+Call Trace:
+ ? __x64_sys_prctl+0x402/0x680
+ ? __ia32_sys_prctl+0x6e0/0x6e0
+ ? __do_page_fault+0x457/0x620
+ ? do_syscall_64+0x6d/0x160
+ ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+After this patch, the trace looks like this:
+
+Call Trace:
+ __x64_sys_prctl+0x402/0x680
+ ? __ia32_sys_prctl+0x6e0/0x6e0
+ ? __do_page_fault+0x457/0x620
+ do_syscall_64+0x6d/0x160
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+prctl_set_seccomp() still doesn't show up in the trace because for some
+reason, tail call optimization is only disabled in builds that use the
+frame pointer unwinder.
+
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: syzbot <syzbot+ca95b2b7aef9e7cbd6ab@syzkaller.appspotmail.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: linux-kbuild@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190301031201.7416-2-jannh@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/unwind_orc.c |   17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -113,6 +113,20 @@ static struct orc_entry *orc_ftrace_find
+ }
+ #endif
++/*
++ * If we crash with IP==0, the last successfully executed instruction
++ * was probably an indirect function call with a NULL function pointer,
++ * and we don't have unwind information for NULL.
++ * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
++ * pointer into its parent and then continue normally from there.
++ */
++static struct orc_entry null_orc_entry = {
++      .sp_offset = sizeof(long),
++      .sp_reg = ORC_REG_SP,
++      .bp_reg = ORC_REG_UNDEFINED,
++      .type = ORC_TYPE_CALL
++};
++
+ static struct orc_entry *orc_find(unsigned long ip)
+ {
+       static struct orc_entry *orc;
+@@ -120,6 +134,9 @@ static struct orc_entry *orc_find(unsign
+       if (!orc_init)
+               return NULL;
++      if (ip == 0)
++              return &null_orc_entry;
++
+       /* For non-init vmlinux addresses, use the fast lookup table: */
+       if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
+               unsigned int idx, start, stop;
diff --git a/queue-5.0/x86-unwind-handle-null-pointer-calls-better-in-frame-unwinder.patch b/queue-5.0/x86-unwind-handle-null-pointer-calls-better-in-frame-unwinder.patch
new file mode 100644 (file)
index 0000000..c301e8a
--- /dev/null
@@ -0,0 +1,123 @@
+From f4f34e1b82eb4219d8eaa1c7e2e17ca219a6a2b5 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 1 Mar 2019 04:12:00 +0100
+Subject: x86/unwind: Handle NULL pointer calls better in frame unwinder
+
+From: Jann Horn <jannh@google.com>
+
+commit f4f34e1b82eb4219d8eaa1c7e2e17ca219a6a2b5 upstream.
+
+When the frame unwinder is invoked for an oops caused by a call to NULL, it
+currently skips the parent function because BP still points to the parent's
+stack frame; the (nonexistent) current function only has the first half of
+a stack frame, and BP doesn't point to it yet.
+
+Add a special case for IP==0 that calculates a fake BP from SP, then uses
+the real BP for the next frame.
+
+Note that this handles first_frame specially: Return information about the
+parent function as long as the saved IP is >=first_frame, even if the fake
+BP points below it.
+
+With an artificially-added NULL call in prctl_set_seccomp(), before this
+patch, the trace is:
+
+Call Trace:
+ ? prctl_set_seccomp+0x3a/0x50
+ __x64_sys_prctl+0x457/0x6f0
+ ? __ia32_sys_prctl+0x750/0x750
+ do_syscall_64+0x72/0x160
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+After this patch, the trace is:
+
+Call Trace:
+ prctl_set_seccomp+0x3a/0x50
+ __x64_sys_prctl+0x457/0x6f0
+ ? __ia32_sys_prctl+0x750/0x750
+ do_syscall_64+0x72/0x160
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: syzbot <syzbot+ca95b2b7aef9e7cbd6ab@syzkaller.appspotmail.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Michal Marek <michal.lkml@markovi.net>
+Cc: linux-kbuild@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190301031201.7416-1-jannh@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/unwind.h  |    6 ++++++
+ arch/x86/kernel/unwind_frame.c |   25 ++++++++++++++++++++++---
+ 2 files changed, 28 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/unwind.h
++++ b/arch/x86/include/asm/unwind.h
+@@ -23,6 +23,12 @@ struct unwind_state {
+ #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
+       bool got_irq;
+       unsigned long *bp, *orig_sp, ip;
++      /*
++       * If non-NULL: The current frame is incomplete and doesn't contain a
++       * valid BP. When looking for the next frame, use this instead of the
++       * non-existent saved BP.
++       */
++      unsigned long *next_bp;
+       struct pt_regs *regs;
+ #else
+       unsigned long *sp;
+--- a/arch/x86/kernel/unwind_frame.c
++++ b/arch/x86/kernel/unwind_frame.c
+@@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_sta
+       }
+       /* Get the next frame pointer: */
+-      if (state->regs)
++      if (state->next_bp) {
++              next_bp = state->next_bp;
++              state->next_bp = NULL;
++      } else if (state->regs) {
+               next_bp = (unsigned long *)state->regs->bp;
+-      else
++      } else {
+               next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
++      }
+       /* Move to the next frame if it's safe: */
+       if (!update_stack_state(state, next_bp))
+@@ -398,6 +402,21 @@ void __unwind_start(struct unwind_state
+       bp = get_frame_pointer(task, regs);
++      /*
++       * If we crash with IP==0, the last successfully executed instruction
++       * was probably an indirect function call with a NULL function pointer.
++       * That means that SP points into the middle of an incomplete frame:
++       * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we
++       * would have written a frame pointer if we hadn't crashed.
++       * Pretend that the frame is complete and that BP points to it, but save
++       * the real BP so that we can use it when looking for the next frame.
++       */
++      if (regs && regs->ip == 0 &&
++          (unsigned long *)kernel_stack_pointer(regs) >= first_frame) {
++              state->next_bp = bp;
++              bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1;
++      }
++
+       /* Initialize stack info and make sure the frame data is accessible: */
+       get_stack_info(bp, state->task, &state->stack_info,
+                      &state->stack_mask);
+@@ -410,7 +429,7 @@ void __unwind_start(struct unwind_state
+        */
+       while (!unwind_done(state) &&
+              (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+-                      state->bp < first_frame))
++                      (state->next_bp == NULL && state->bp < first_frame)))
+               unwind_next_frame(state);
+ }
+ EXPORT_SYMBOL_GPL(__unwind_start);